code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing the datset
dataset = pd.read_csv('creditcard.csv')
x = dataset.iloc[:,1:30].values
y = dataset.iloc[:,30].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 32)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
x_train = sc_x.fit_transform(x_train)
x_test = sc_x.transform(x_test)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
# +
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
# Results in form of Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# -
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = x_train, y = y_train, cv = 10, n_jobs = -1)
accuracies.mean()
accuracies.std()
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
# ### Selecting 5 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=5, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing selected Features
feat_cols1 = list(sfs1.k_feature_idx_)
print(feat_cols1) #[10, 12, 14, 17, 25]
#Fitting in the model
classifier.fit(x_train[:,feat_cols1],y_train)
y_pred = classifier.predict(x_test[:,feat_cols1])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# ### Selecting 10 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=10, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing selected Features
feat_cols2 = list(sfs1.k_feature_idx_)
print(feat_cols2) #[10, 11, 12, 14, 15, 17, 18, 23, 24, 25]
#Fitting in the models
classifier.fit(x_train[:,feat_cols2],y_train)
y_pred = classifier.predict(x_test[:,feat_cols2])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# ### Selecting 15 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=15, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols3 = list(sfs1.k_feature_idx_)
print(feat_cols3) # [3, 5, 8, 10, 11, 12, 14, 15, 17, 18, 21, 23, 24, 25, 28]
#Fitting in the models
classifier.fit(x_train[:,feat_cols3],y_train)
y_pred = classifier.predict(x_test[:,feat_cols3])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# ### Selecting 20 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=20, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols4 = list(sfs1.k_feature_idx_)
print(feat_cols4) # [2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 28]
# Fitting in the models
classifier.fit(x_train[:,feat_cols4],y_train)
y_pred = classifier.predict(x_test[:,feat_cols4])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# ### Selecting 21 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=21, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols5 = list(sfs1.k_feature_idx_)
print(feat_cols5) # [2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 27, 28]
# Fitting in the models
classifier.fit(x_train[:,feat_cols5],y_train)
y_pred = classifier.predict(x_test[:,feat_cols5])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# ### Selecting 19 features
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=19, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols6 = list(sfs1.k_feature_idx_)
print(feat_cols6) # [2, 3, 4, 5, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 28]
# Fitting in the models
classifier.fit(x_train[:,feat_cols6],y_train)
y_pred = classifier.predict(x_test[:,feat_cols6])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=18, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols7 = list(sfs1.k_feature_idx_)
print(feat_cols7) # [2, 3, 5, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 28]
# Fitting in the models
classifier.fit(x_train[:,[2, 3, 5, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 28]],y_train)
y_pred = classifier.predict(x_test[:,[2, 3, 5, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 28]])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = x_train, y = y_train, cv = 10, n_jobs = -1)
accuracies.mean()
accuracies.std()
# Fitting Feature Selector
sfs1 = sfs(classifier, k_features=17, verbose = 2)
sfs1 = sfs1.fit(x_train, y_train)
# Showing Features
feat_cols8 = list(sfs1.k_feature_idx_)
print(feat_cols8) # [2, 3, 5, 8, 10, 11, 12, 13, 14, 15, 17, 18, 21, 23, 24, 25, 28]
# Fitting in the models
classifier.fit(x_train[:,feat_cols8],y_train)
y_pred = classifier.predict(x_test[:,feat_cols8])
# Comparing and evaluating results using Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
| src/2D) Using_ScoreComparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing GalFlow convolutions against GalSim
# %pylab inline
import galsim
# ## Step 1: Get some input images from GalSim
cat = galsim.COSMOSCatalog()
gal = cat.makeGalaxy(2, gal_type='real', noise_pad_size=0)
psf = galsim.Gaussian(0.07)
# Checking psf and gal image scales
pixel_scale=0.03
# We draw the PSF image in Kspace at the correct resolution
N = 110
im_scale = 0.03
interp_factor=2
padding_factor=4
Nk = N*interp_factor*padding_factor
from galsim.bounds import _BoundsI
import tensorflow as tf
import galflow as gf
# +
bounds = _BoundsI(-Nk//2, Nk//2-1, -Nk//2, Nk//2-1)
impsf = psf.drawKImage(bounds=bounds,
scale=2.*np.pi/(N*padding_factor* im_scale))
imgal = gal.drawKImage(bounds=bounds,
scale=2.*np.pi/(N*padding_factor* im_scale))
# -
kgal = galsim.InterpolatedKImage(imgal, k_interpolant='linear')
kpsf = galsim.InterpolatedKImage(impsf, k_interpolant='linear')
imshow(log10(abs(imgal.array)), cmap='gist_stern',vmin=-10);colorbar()
tfimpsf = tf.convert_to_tensor(impsf.array, dtype=tf.complex64)
tfimgal = tf.convert_to_tensor(imgal.array, dtype=tf.complex64)
tfimpsf = tf.reshape(tfimpsf, shape=[1, 110*interp_factor*padding_factor, 110*interp_factor*padding_factor, 1])
tfimgal = tf.reshape(tfimgal, shape=[1, 110*interp_factor*padding_factor, 110*interp_factor*padding_factor, 1])
import galflow as gf
sheared_tfimgal= gf.shear(tfimgal, g1=0.3*tf.ones(1), g2=0.2*tf.ones(1))# tf.complex(a,b )
# convolve with image
tfconv = sheared_tfimgal*tfimpsf
# +
sheared_kgal = kgal.shear(g1=0.3, g2=0.2)
sheared_kgal = galsim.Convolve(sheared_kgal, kpsf)
bounds = _BoundsI(-Nk//2, Nk//2-1, -Nk//2, Nk//2-1)
k_imkref = sheared_kgal.drawKImage(bounds=bounds,
scale=2.*np.pi/(N*padding_factor* im_scale))
# +
figure(figsize=[16,5])
subplot(131)
imshow(log10(abs(k_imkref.array)), cmap='gist_stern',vmax=1);colorbar()
subplot(132)
imshow(log10(abs(tfconv[0,:,:,0])), cmap='gist_stern',vmax=1);colorbar()
subplot(133)
imshow(log10(abs(tfconv[0,:,:,0].numpy() - k_imkref.array)), cmap='gist_stern');colorbar()
# -
tfconv.shape
tfconv.dtype
# Removing negative frequencies
rtfconv = tf.signal.fftshift(tfconv,axes=2)[:,:,:880//2+1,0]
# +
# # Apply kwrapping
# def k_wrapping(kimage, wrap_factor=2):
# """
# Wraps kspace image of a real image to decrease its resolution by specified
# factor
# """
# batch_size, Nkx, Nky = kimage.get_shape().as_list()
# # First wrap around the non hermitian dimension
# rkimage = kimage + tf.roll(kimage, shift=-Nkx//wrap_factor, axis=1)+ tf.roll(kimage, shift=Nkx//wrap_factor, axis=1)
# # Now take care of the hermitian part
# revrkimage = tf.roll(tf.reverse(tf.math.conj(tf.reverse(rkimage, axis=[2])), axis=[1]), shift=-Nkx//wrap_factor, axis=2)
# # These masks take care of the special case of the 0th frequency
# mask = np.ones((1, Nkx, Nky))
# mask[:,0,:]=0
# mask[:,Nkx//wrap_factor-1,:]=0
# rkimage2 = rkimage + revrkimage*mask
# mask = np.zeros((1, Nkx, Nky))
# mask[:,Nkx//wrap_factor-1,:]=1
# rkimage2 = rkimage2 + tf.roll(revrkimage,shift=-1, axis=1) *mask
# # Now that we have wrapped the image, we can truncate it to the desired size
# kimage = tf.signal.fftshift(rkimage2, axes=1)[:, :Nkx//wrap_factor, :(Nky-1)//wrap_factor+1]
# return kimage
def k_wrapping(kimage, wrap_factor=2):
"""
Wraps kspace image of a real image to decrease its resolution by specified
factor
"""
batch_size, Nkx, Nky = kimage.get_shape().as_list()
# First wrap around the non hermitian dimension
rkimage = kimage + tf.roll(kimage, shift=Nkx//wrap_factor, axis=1)
# Now take care of the hermitian part
revrkimage = tf.reverse(tf.math.conj(tf.reverse(rkimage, axis=[2])), axis=[1])
# These masks take care of the special case of the 0th frequency
mask = np.ones((1, Nkx, Nky))
mask[:,0,:]=0
mask[:,Nkx//wrap_factor-1,:]=0
rkimage2 = rkimage + revrkimage*mask
mask = np.zeros((1, Nkx, Nky))
mask[:,Nkx//wrap_factor-1,:]=1
rkimage2 = rkimage2 + tf.roll(revrkimage,shift=-1, axis=1) *mask
# Now that we have wrapped the image, we can truncate it to the desired size
kimage = rkimage2[:, :Nkx//wrap_factor, :(Nky-1)//wrap_factor+1]
return kimage
# -
rtfconv = k_wrapping(rtfconv, wrap_factor=2)
imshow(log10(abs(rtfconv[0,:,:])), cmap='gist_stern',vmin=-10);colorbar()
# Ifft
conv_images = tf.expand_dims(tf.signal.fftshift(tf.signal.irfft2d(rtfconv)),-1)
# removing padding
conv_images = tf.image.resize_with_crop_or_pad(conv_images, 110, 110)
imshow((((conv_images[0,:,:].numpy())))); colorbar()
# +
# Same thing with GalSim
#sheared_gal = gal.original_gal
sheared_gal = gal.shear(g1=0.3, g2=0.2)
sheared_gal = galsim.Convolve(sheared_gal, psf)
im_ref = sheared_gal.drawImage(nx=110, ny=110, scale=0.03,
method='no_pixel',use_true_center=False).array
# +
sheared_kgal = kgal.shear(g1=0.3, g2=0.2)
sheared_kgal = galsim.Convolve(sheared_kgal, kpsf)
im_kref = sheared_kgal.drawImage(nx=110, ny=110, scale=0.03,
method='no_pixel',use_true_center=False).array
# +
bounds = _BoundsI(-Nk//2, Nk//2-1, -Nk//2, Nk//2-1)
k_imkref = sheared_kgal.drawKImage(bounds=bounds,
scale=2.*np.pi/(N*padding_factor*im_scale),
recenter=False)
# -
figure(figsize=[15,3])
subplot(131)
title('Sheared TF galaxy')
imshow(((conv_images[0,:,:].numpy().real)))
colorbar();
subplot(132)
title('Sheared GalSim galaxy')
imshow(im_ref)
colorbar();
subplot(133)
title('Residuals')
imshow(im_ref - conv_images[0,:,:,0].numpy().real);
colorbar();
# This plot is using the same interpolation order in both TF and GalSim
figure(figsize=[15,3])
subplot(131)
title('Sheared TF galaxy')
imshow(((conv_images[0,:,:].numpy().real)))
colorbar();
subplot(132)
title('Sheared GalSim galaxy')
imshow(im_kref)
colorbar();
subplot(133)
title('Residuals')
imshow(im_kref - conv_images[0,:,:,0].numpy());
colorbar();
| notebooks/TestingFourierShear.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# PyTorch: optim
# --------------
#
# A fully-connected ReLU network with one hidden layer, trained to predict y from x
# by minimizing squared Euclidean distance.
#
# This implementation uses the nn package from PyTorch to build the network.
#
# Rather than manually updating the weights of the model as we have been doing,
# we use the optim package to define an Optimizer that will update the weights
# for us. The optim package defines many optimization algorithms that are commonly
# used for deep learning, including SGD+momentum, RMSProp, Adam, etc.
#
#
# +
import torch
from torch.autograd import Variable
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(size_average=False)
# Use the optim package to define an Optimizer that will update the weights of
# the model for us. Here we will use Adam; the optim package contains many other
# optimization algoriths. The first argument to the Adam constructor tells the
# optimizer which Variables it should update.
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x)
# Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, loss.data[0])
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
| two_layer_net_optim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DeepLearningInterpreter/occlusion_experiments/blob/master/colab_notebooks/Visualizing_Detections_With(out)_Occlusion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Fyk7FY6S1zB0" colab_type="text"
# ##Introduction
# + [markdown] id="mMFo33mL17L7" colab_type="text"
# The purpose of this notebook is to detect the pistol(s) on images and visualize these detections by drawing bounding boxes. Optionally, an occlusion box can be placed in the image to see how the models deal with the occlusion. The notebook is structured as follows.
#
# **First** you can choose whether you want to use SSD or Faster R-CNN.
#
# **The next few parts** are important but can be skipped or quickly glanced over. In these parts, the repository is cloned to the cloud server, the necessary imports are taken care of, the model is loaded into memory, and some helpful functions are defined.
#
# **The final part** of the notebook is the most interesting. Here, you can specify the images of interest and optionally draw a bounding box in the image. The detection visualizations will be printed.
# + [markdown] id="gFefh6g2FAT0" colab_type="text"
# ##Choose Your Model
# Choose the meta architecture that you want to use.
#
# + id="eEXTOOuo3CpB" colab_type="code" colab={}
#Set this variable equal to "SSD" or "FRCNN" (for Faster R-CNN)
meta_architecture = "SSD"
# + id="rMuRCv5AHJTJ" colab_type="code" colab={}
if meta_architecture == "SSD":
MODEL_NAME = "SSD_ext_lrCyc"
MODEL_TYPE = "SSD"
elif meta_architecture == "FRCNN":
MODEL_NAME = "FRCNN_ext_lr3"
MODEL_TYPE = "FRCNN"
else:
raise ValueError(
'The meta_architecture variable has to be set to either "SSD" or "FRCNN".'
)
# + [markdown] id="JnaC-s_3jaG6" colab_type="text"
# ---------------------------------------------------------------------------
# ##Cloning the GitHub repository to the cloud server.
#
#
# + id="1Gl-A13XLHZc" colab_type="code" outputId="d2307f2b-d9c2-441a-d98e-e221934ce87d" colab={"base_uri": "https://localhost:8080/", "height": 671}
#Downloading and installing git lfs
# !curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
# !sudo apt-get install git-lfs
# !git lfs install
# + id="Uh3_od2-zxdl" colab_type="code" outputId="81ea8a99-b1ff-4b32-9610-98b70d8e3322" colab={"base_uri": "https://localhost:8080/", "height": 240}
#This takes a few minutes but less than five for sure!
#Cloning repository. The exclude flag indicates that large files from the "training" and "evaluation_outcomes"
#subdirectories should not be downloaded
# !git lfs clone https://github.com/DeepLearningInterpreter/occlusion_experiments.git --exclude="occlusion_experiments/main_content/multitude_of_possible_detectors/training, occlusion_experiments/main_content/multitude_of_possible_detectors/evaluation_outcomes"
# + id="wOvyEGjO0bOT" colab_type="code" outputId="bfab3e26-97a9-4e57-c856-50f6beadf6df" colab={"base_uri": "https://localhost:8080/", "height": 34}
import os
os.chdir("/content/occlusion_experiments/TF_object_detection_API_modified")
os.chdir("object_detection")
os.getcwd()
# + [markdown] id="Pp2K7I5G1XOf" colab_type="text"
# ---------------------------------------------------------------------------------------------------------------------
# ##Imports and function definitions
# + id="efkJtoawELEh" colab_type="code" colab={}
# coding: utf-8
# # Imports
import csv
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tensorflow as tf
import time
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import scipy.misc
sys.path.append("..")
from utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# + id="Srqxo2frWlcb" colab_type="code" colab={}
#makes sure there is no printing output
# %%capture
#more imports
from object_detection.utils import label_map_util
from utils import visualization_utils as vis_util
# + id="FuVNXz2b3q0T" colab_type="code" colab={}
os.chdir('/content/occlusion_experiments/main_content')
# + id="jw5TJAYTBfR4" colab_type="code" colab={}
# # Model preparation
path_to_model = 'multitude_of_possible_detectors/frozen_models_for_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = path_to_model + MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data/main_data', 'pistol_car_label_map.pbtxt')
#Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
#Loading label map
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
#Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# + [markdown] id="b7yBcMrNqSfn" colab_type="text"
# end of imports and function definitions.
#
# -------------------------------------------------------------
# ##Begin occlusion and inference
#
# + [markdown] id="qseaxDCT542-" colab_type="text"
# Specify the name of the image of interest in this cell. The image will be printed so you can decide where to draw the occlusion box.
# + id="mOgKSaAJ5vy3" colab_type="code" outputId="a57e47e0-73d4-45b7-e007-27981e92f2fc" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Specify the index of the image of interest. To see which index corresponds
#to which image, uncomment the second to next codeblock and run it.
image_index = 7
#printing image
with open('data/occlusion_images/nameAndBB.csv') as f:
reader = csv.reader(f)
image_info = list(reader)
print(image_index)
image_path = "data/occlusion_images/" + image_info[image_index][0]
image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)
plt.imshow(image_np)
plt.show()
# + [markdown] id="A792c02WBLgW" colab_type="text"
# Decide the location of the occlusion box. If you do not want to occlude anything you can set the *size* variable to zero.
# + id="41w0dsqYA3dF" colab_type="code" colab={}
#The next two variables decide where the top left corner of the occlusion box
#will be. Choosing x0 = 0 and y0 = 0 means that the top left corner of the
#occlusion box will be in the top left corner of the image.
x0 = 403 #along the horizontal axis
y0 = 125 #along the vertical axis
#This parameter decides the size of the box in terms of the number of pixels.
#Set size = 0 for no occlusion
size = 80
# + [markdown] id="stndAbVuH5cJ" colab_type="text"
# With the following codeblock you can find out which image corresponds to what index.
# Just uncomment and run the code below:
# + id="QlDS7j7dGDt5" colab_type="code" colab={}
# with open('data/occlusion_images/nameAndBB.csv') as f:
# reader = csv.reader(f)
# image_info = list(reader)
# for i in range(1,30):
# print(i)
# image_path = "data/occlusion_images/" + image_info[i][0]
# image = Image.open(image_path)
# image_np = load_image_into_numpy_array(image)
# plt.imshow(image_np)
# plt.show()
# + [markdown] id="AzcfLJxW6OtU" colab_type="text"
# Run the cell below and the visualization of the detection will be printed.
# + id="dyLOXCLq7Z3Q" colab_type="code" outputId="ca765548-df77-4cbd-83aa-c55df0b651ee" colab={"base_uri": "https://localhost:8080/", "height": 311}
# %matplotlib inline
from occlusion_help_funcs.help_funcs import compute_IoU
import csv
with open('data/occlusion_images/nameAndBB.csv') as f:
reader = csv.reader(f)
image_info = list(reader)
x1 = x0 + size
y1 = y0 + size
#retrieve the ground truth bounding box
gt_box = image_info[image_index]
image_sel = gt_box[0]
gt_box = gt_box[1:5]
gt_box = [int(x) for x in gt_box]
#construct image path
PATH_TO_TEST_IMAGES_DIR = 'data/occlusion_images'
image_path = os.path.join(PATH_TO_TEST_IMAGES_DIR, image_sel)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
#open image
image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)
#make area on image grey
image_np[y0:y1, x0:x1, 0] = 163
image_np[y0:y1, x0:x1, 1] = 157
image_np[y0:y1, x0:x1, 2] = 152
#making plots nicer
plt.rcParams["axes.grid"] = False
#begin inference
with detection_graph.as_default():
with tf.Session() as sess:
#Necessary model preparation--------------
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
#end necessary model preparation------------
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
start = time.time()
# Get handles to input and output tensors
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_np.shape[0], image_np.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image_np, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
end = time.time()
#import pdb; pdb.set_trace()
print(end-start)
#Convert to relative ground truth box coordinates
gt_box = [gt_box[0]/image_np.shape[0], gt_box[1]/image_np.shape[1],
gt_box[2]/image_np.shape[0], gt_box[3]/image_np.shape[1]]
#compute IoU
highest = 0.2
detected_bool = False
#checking the IoU for every detection
for k in range(output_dict['num_detections']):
pred_box = output_dict['detection_boxes'][k]
IoU = compute_IoU(gt_box, pred_box)
if IoU > highest:
highest = IoU
index = k
detected_bool = True
IoU = highest
#Visualize the ground truth box.
vis_util.draw_bounding_box_on_image_array(
image_np, gt_box[0], gt_box[1], gt_box[2], gt_box[3],
color='red', thickness=8
)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
min_score_thresh=.25,
line_thickness=8)
fig, ax = plt.subplots(ncols=1)
im1 = ax.imshow(image_np)
ax.set_ylabel('')
if IoU > .2:
ax.set_xlabel("IoU = {:.2f}".format(IoU))
# Turn off tick labels
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.show()
if IoU > .2:
print("The intersection over union is: ", IoU)
print("Confidence is: ", output_dict['detection_scores'][index])
# + id="1Em3812E4rAQ" colab_type="code" colab={}
| colab_notebooks/Visualizing_Detections_With(out)_Occlusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import Dependencies
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
from sklearn.linear_model import LogisticRegression
# Import the data
bank_df = pd.read_csv('./data/bank_full_w_dummy_vars.csv')
bank_df.head()
for col in bank_df:
print(col)
print(bank_df[col].unique())
bank_df = bank_df.dropna()
bank_df.shape
bank_df = bank_df[bank_df.education != "unknown"]
bank_df.shape
bank_df = bank_df[bank_df.job != "unknown"]
bank_df.shape
bank_df['y'].value_counts()
success = bank_df[bank_df.y == "yes"]
success.shape
fail = bank_df[bank_df.y != "yes"]
fail = fail.sample(n=5021)
fail.shape
bank_df = pd.concat([success, fail])
bank_df
for col in bank_df:
print(col)
print(bank_df[col].unique())
test_df = bank_df[["y", 'age', 'marital', 'job', 'loan', 'education']]
test_df
test_df['job'] = test_df['job'].replace(["management", 'technician', 'entrepreneur',
'blue-collar', 'admin.', 'services', 'self-employed', 'housemaid'],
"employed")
for col in test_df:
print(col)
print(test_df[col].unique())
test_df = pd.get_dummies(test_df)
test_df.head()
X = test_df.ix[:,(0, 3, 4, 6, 7, 8, 11, 12, 13)].values
y = test_df.iloc[:,1].values
# Fitting the data with logistic regression model
logReg = LogisticRegression()
clf = logReg.fit(X, y)
# Predict with input data
new_user = [[30, 0, 0, 0, 1, 0, 1, 0, 1]]
y_pred = logReg.predict(new_user)
y_pred
# Predict the probability
new_user = [[40, 0, 0, 0, 0, 1, 0, 1, 0]]
clf.predict_proba(new_user)
clf.score(X, y)
# Predict the probability
new_user = [[50, 0, 1, 1, 0, 0, 0, 0, 0]]
clf.predict_proba(new_user)
clf.coef_
# Predicting the test set results
y_hat = logReg.predict(X)
# Create confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_hat, labels=[0, 1])
# ### Package the Model into a Pickle Object
# Import pickle to pacakge the mdoel
import pickle
# Saving model to disk
pickle.dump(logReg, open("./model/reg_model.pkl", "wb"))
#Loading mdoel to compare the results
model = pickle.load(open("./model/reg_model.pkl", "rb"))
print(model.predict_proba([[55, 0, 1, 0, 0, 0, 1, 0, 1]]))
# Check the model score
model.score(X, y)
| Web_Application_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started with TensorFlow (Eager Mode)
#
# **Learning Objectives**
# - Understand difference between Tensorflow's two modes: Eager Execution and Graph Execution
# - Practice defining and performing basic operations on constant Tensors
# - Use Tensorflow's automatic differentiation capability
#
# ## Introduction
#
# **Eager Execution**
#
# Eager mode evaluates operations immediatley and return concrete values immediately. To enable eager mode simply place `tf.enable_eager_execution()` at the top of your code. We recommend using eager execution when prototyping as it is intuitive, easier to debug, and requires less boilerplate code.
#
# **Graph Execution**
#
# Graph mode is TensorFlow's default execution mode (although it will change to eager with TF 2.0). In graph mode operations only produce a symbolic graph which doesn't get executed until run within the context of a tf.Session(). This style of coding is less inutitive and has more boilerplate, however it can lead to performance optimizations and is particularly suited for distributing training across multiple devices. We recommend using delayed execution for performance sensitive production code.
import tensorflow as tf
print(tf.__version__)
# ## Eager Execution
tf.enable_eager_execution()
# ### Adding Two Tensors
#
# The value of the tensor, as well as its shape and data type are printed
a = tf.constant(value = [5, 3, 8], dtype = tf.int32)
b = tf.constant(value = [3, -1, 2], dtype = tf.int32)
c = tf.add(x = a, y = b)
print(c)
# #### Overloaded Operators
# We can also perform a `tf.add()` using the `+` operator. The `/,-,*` and `**` operators are similarly overloaded with the appropriate tensorflow operation.
c = a + b # this is equivalent to tf.add(a,b)
print(c)
# ### NumPy Interoperability
#
# In addition to native TF tensors, tensorflow operations can take native python types and NumPy arrays as operands.
# +
import numpy as np
a_py = [1,2] # native python list
b_py = [3,4] # native python list
a_np = np.array(object = [1,2]) # numpy array
b_np = np.array(object = [3,4]) # numpy array
a_tf = tf.constant(value = [1,2], dtype = tf.int32) # native TF tensor
b_tf = tf.constant(value = [3,4], dtype = tf.int32) # native TF tensor
for result in [tf.add(x = a_py, y = b_py), tf.add(x = a_np, y = b_np), tf.add(x = a_tf, y = b_tf)]:
print("Type: {}, Value: {}".format(type(result), result))
# -
# You can convert a native TF tensor to a NumPy array using .numpy()
a_tf.numpy()
# ### Linear Regression
#
# Now let's use low level tensorflow operations to implement linear regression.
#
# Later in the course you'll see abstracted ways to do this using high level TensorFlow.
# #### Toy Dataset
#
# We'll model the following function:
#
# \begin{equation}
# y= 2x + 10
# \end{equation}
X = tf.constant(value = [1,2,3,4,5,6,7,8,9,10], dtype = tf.float32)
Y = 2 * X + 10
print("X:{}".format(X))
print("Y:{}".format(Y))
# #### Loss Function
#
# Using mean squared error, our loss function is:
# \begin{equation}
# MSE = \frac{1}{m}\sum_{i=1}^{m}(\hat{Y}_i-Y_i)^2
# \end{equation}
#
# $\hat{Y}$ represents the vector containing our model's predictions:
# \begin{equation}
# \hat{Y} = w_0X + w_1
# \end{equation}
def loss_mse(X, Y, w0, w1):
Y_hat = w0 * X + w1
return tf.reduce_mean(input_tensor = (Y_hat - Y)**2)
# #### Gradient Function
#
# To use gradient descent we need to take the partial derivative of the loss function with respect to each of the weights. We could manually compute the derivatives, but with Tensorflow's automatic differentiation capabilities we don't have to!
#
# During gradient descent we think of the loss as a function of the parameters $w_0$ and $w_1$. Thus, we want to compute the partial derivative with respect to these variables. The `params=[2,3]` argument tells TensorFlow to only compute derivatives with respect to the 2nd and 3rd arguments to the loss function (counting from 0, so really the 3rd and 4th).
# Counting from 0, the 2nd and 3rd parameter to the loss function are our weights
grad_f = tf.contrib.eager.gradients_function(f = loss_mse, params=[2,3])
# #### Training Loop
#
# Here we have a very simple training loop that converges. Note we are ignoring best practices like batching, creating a separate test set, and random weight initialization for the sake of simplicity.
# +
STEPS = 1000
LEARNING_RATE = .02
# Initialize weights
w0 = tf.constant(value = 0.0, dtype = tf.float32)
w1 = tf.constant(value = 0.0, dtype = tf.float32)
for step in range(STEPS):
#1. Calculate gradients
d_w0, d_w1 = grad_f(X, Y, w0, w1)
#2. Update weights
w0 = w0 - d_w0 * LEARNING_RATE
w1 = w1 - d_w1 * LEARNING_RATE
#3. Periodically print MSE
if step % 100 == 0:
print("STEP: {} MSE: {}".format(step, loss_mse(X, Y, w0, w1)))
# Print final MSE and weights
print("STEP: {} MSE: {}".format(STEPS,loss_mse(X, Y, w0, w1)))
print("w0:{}".format(round(float(w0), 4)))
print("w1:{}".format(round(float(w1), 4)))
# -
# ## Bonus
# Try modelling a non-linear function such as: $y=xe^{-x^2}$
# +
X = tf.constant(value = np.linspace(0,2,1000), dtype = tf.float32)
Y = X*np.exp(-X**2) * X
from matplotlib import pyplot as plt
# %matplotlib inline
plt.plot(X, Y)
# +
def make_features(X):
features = [X]
features.append(tf.ones_like(X)) # Bias.
features.append(tf.square(X))
features.append(tf.sqrt(X))
features.append(tf.exp(X))
return tf.stack(features, axis=1)
def make_weights(n_weights):
W = [tf.constant(value = 0.0, dtype = tf.float32) for _ in range(n_weights)]
return tf.expand_dims(tf.stack(W),-1)
def predict(X, W):
Y_hat = tf.matmul(X, W)
return tf.squeeze(Y_hat, axis=-1)
def loss_mse(X, Y, W):
Y_hat = predict(X, W)
return tf.reduce_mean(input_tensor = (Y_hat - Y)**2)
X = tf.constant(value = np.linspace(0,2,1000), dtype = tf.float32)
Y = np.exp(-X**2) * X
grad_f = tf.contrib.eager.gradients_function(f = loss_mse, params=[2])
# +
STEPS = 2000
LEARNING_RATE = .02
# Weights/features.
Xf = make_features(X)
# Xf = Xf[:,0:2] # Linear features only.
W = make_weights(Xf.get_shape()[1].value)
# For plotting
steps = []
losses = []
plt.figure()
for step in range(STEPS):
#1. Calculate gradients
dW = grad_f(Xf, Y, W)[0]
#2. Update weights
W -= dW * LEARNING_RATE
#3. Periodically print MSE
if step % 100 == 0:
loss = loss_mse(Xf, Y, W)
steps.append(step)
losses.append(loss)
plt.clf()
plt.plot(steps, losses)
# Print final MSE and weights
print("STEP: {} MSE: {}".format(STEPS,loss_mse(Xf, Y, W)))
# Plot results
plt.figure()
plt.plot(X, Y, label='actual')
plt.plot(X, predict(Xf, W), label='predicted')
plt.legend()
# -
# Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/02_tensorflow/a_tfstart_eager.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='darkblue'>What is a function?</font>
# <b>A function is generally known as a mathematical object, although the concept is also ubiquitous in everyday life</b>. Unfortunately, in everyday life, we often confuse functions and effects. And what is even more unfortunate is that we also make this mistake when working with many programming languages.
#
# ## <font color='darkgreen'>Functions in the real world</font>
# In the real world, a function is primarily a mathematic concept. It’s a relation between a source set, called the function domain, to a target set, called the function codomain. The domain and the codomain need not be distinct. A function can have the same set of integer numbers for its domain and its codomain, for example.
#
# <b><font size='3'>What makes a relation between two sets a function</font></b><br/>
# To be a function, a relation must fulfill one condition: all elements of the domain must have one and only one corresponding element in the codomain, as shown in below figure:
# <img src='https://2.bp.blogspot.com/-oSfVsuKVXcE/Wfm5yJnJhYI/AAAAAAAAXCA/Gq5Q_jjdK9wik1LJjYoy3f1ALYomLrcYgCLcBGAs/s1600/4070_f2-1.PNG'/><br/>
#
# This has some interesting implications:
# * There cannot exist elements in the domain with no corresponding value in the codomain.
# * There cannot exist two elements in the codomain corresponding to the same element of the domain.
# * There may be elements in the codomain with no corresponding element in the source set.
# * There may be elements in the codomain with more than one corresponding element in the source set.
# * The set of elements of the codomain that have a corresponding element in the domain is called the image of the function.
#
# <b><font size='3'>Partial functions</font></b><br/>
# A relation that isn’t defined for all elements of the domain but that fulfills the rest of the requirement (<font color='brown'>no element of the domain can have a relationship with more than one element of the codomain</font>) is often called a <b><font color='darkblue'>partial function</font></b>. The relation predecessor(x) is a partial function on <i>N</i> (<font color='brown'>the set of positive integers plus 0</font>), but it’s a total function on N*, which is the set of positive integers without 0, and its codomain is N.
#
# Partial functions are important in programming because many bugs are the result of using a partial function as if it were a total one. For example, the relation <font color='blue'>f(x) = 1/x</font> is a partial function from N to Q (<font color='brown'>the rational numbers</font>) because it isn’t defined for 0. It’s a total function from N* to Q, but it’s also a total function from N to (<font color='brown'>Q plus error</font>). <b>By adding an element to the codomain</b> (<font color='brown'>the error condition</font>)<b>, you can transform the partial function into a total one.</b> But to do this, the function needs a way to return an error. Can you see an analogy with computer programs? You’ll see that turning partial functions into total ones is an important part of functional programming.
#
# <b><font size='3'>Function composition </font></b><br/>
# Functions are building blocks that can be composed to build other functions. The composition of functions <i>f</i> and <i>g</i> is noted as <b><font color='blue'>f ˚ g</font></b>, which reads as <b>f round g</b>. If f(x) = x + 2 and g(x) = x * 2, then:
# ```python
# f ˚ g (x) = f(g(x)) = f(x * 2) = (x * 2) + 2
# ```
#
# Note that the two notations f ˚ g (x) and f(g(x)) are equivalent. But writing a composition as f(g(x)) implies using x as a placeholder for the argument. Using the f ˚ g notation, you can express a function composition without using this placeholder.
#
# If you apply this function to 5, you’ll get the following:
# ```python
# f ˚ g (5) = f(g(5)) = f(5 * 2) = 10 + 2 = 12
# ```
#
# It’s interesting to note that f ˚ g is generally different from g ˚ f, although they may sometimes be equivalent. For example:
# ```python
# g ˚ f (5) = g(f(5)) = g(5 + 2) = 7 * 2 = 14
# ```
#
# <b><font size='3'>Functions of several arguments</font></b><br/>
# So far, we’ve talked only about functions of one argument. What about functions of several arguments? Simply said, there’s no such thing as a function of several arguments. Remember the definition? <b>A function is a relation between a source set and a target set. It isn’t a relation between two or more source sets and a target set. A function can’t have several arguments. But the product of two sets is itself a set, so a function from such a product of sets into a set may appear to be a function of several arguments</b>. Let’s consider the following function:
# ```python
# f(x, y) = x + y
# ```
# This may be a relation between N x N and N, in which case, it’s a function. But it has only one argument, which is an element of N x N. N x N is the set of all possible pairs of integers. An element of this set is a pair of integers, and a pair is a special case of the more general tuple concept used to represent combinations of several elements. A pair is a tuple of two elements.
# Tuples are noted between parentheses, so (3, 5) is a tuple and an element of N x N. The function <i>f</i> can be applied to this tuple:
# ```python
# f((3, 5)) = 3 + 5 = 8
# ```
#
# In such a case, you may, by convention, simplify writing by removing one set of parentheses:
# ```python
# f(3, 5) = 3 + 5 = 8
# ```
#
# Nevertheless, it’s still a function of one tuple, and not a function of two arguments.
#
# <b><font size='3'>Function currying</font></b><br/>
# Functions of tuples can be thought of differently. The function <font color='blue'>f(3, 5)</font> might be considered as a function from N to a set of functions of N. So the previous example could be rewritten as:<br/>
# ```python
# f(x)(y) = g(y)
# ```
# where
# ```python
# g(y) = x + y
# ```
# In such a case, you can write
# ```python
# f(x) = g
# ```
# which means that the result of applying the function <i>f</i> to the argument x is a new function <i>g</i>. Applying this <i>g</i> function to y gives the following:
# ```python
# g(y) = x + y
# ```
# When applying <i>g</i>, x is no longer a variable. It doesn’t depend on the argument or on anything else. It’s a constant. If you apply this to (3, 5), you get the following:
# ```python
# f(3)(5) = g(5) = 3 + 5 = 8
# ```
# <b>The only new thing here is that the codomain of <i>f</i> is a set of functions instead of a set of numbers. The result of applying f to an integer is a function. The result of applying this function to an integer is an integer.</b> <font color='blue'>f(x)(y)</font> is the curried form of the function <font color='blue'>f(x, y)</font>. Applying this transformation to a function of a tuple (<font color='brown'>which you can call a function of several arguments if you prefer</font>) is called <font color='darkblue'><b>currying</b></font>, after the mathematician Haskell Curry (<font color='brown'>although he wasn’t the inventor of this transformation</font>).
#
# <b><font size='3'>Partially applied functions</font></b><br/>
# The curried form of the addition function may not seem natural, and you might wonder if it corresponds to something in the real world. After all, <b>with the curried version, you’re considering both arguments separately. One of the arguments is considered first, and applying the function to it gives you a new function</b>. Is this new function useful by itself, or is it simply a step in the global calculation?
#
# In the case of an addition, it doesn’t seem useful. And by the way, you could start with either of the two arguments and it would make no difference. The intermediate function would be different, but not the end result. Now consider a new function of a pair of values:
# ```python
# f(rate, price) = price / 100 * (100 + rate)
# ```
#
# That function seems to be equivalent to this:
# ```python
# g(price, rate) = price / 100 * (100 + rate)
# ```
#
# Let’s now consider the curried versions of these two functions:
# ```python
# f(rate)(price)
# g(price)(rate)
# ```
#
# You know that <i>f</i> and <i>g</i> are functions. But what are <font color='blue'>f(rate)</font> and <font color='blue'>g(price)</font>? Yes, for sure, they’re the results of applying <i>f</i> to rate and <i>g</i> to price. But what are the types of these results? <font color='blue'>f(rate)</font> is a function of a price to a price. If rate = 9, this function applies a tax of 9% to a price, giving a new price. You could call the resulting function <font color='blue'>apply9-percentTax(price)</font>, and it would probably be a useful tool because the tax rate doesn’t change often.
#
# On the other hand, <font color='blue'>g(price)</font> is a function of a rate to a price. If the price is $100, it gives a new function applying a price of $100 to a variable tax. What could you call this function? If you can’t think of a meaningful name, that usually means that it’s useless, though this depends on the problem you have to solve. Functions like <font color='blue'>f(rate)</font> and <font color='blue'>g(price)</font> are sometimes called <font color='darkblue'><b>partially applied functions</b></font>, in reference to the forms <font color='blue'>f(rate, price)</font> and <font color='blue'>g(price, rate)</font>. <b>Partially applying functions can have huge consequences regarding argument evaluation.</b> We’ll come back to this subject in a later section.
#
# <b><font size='3'>Functions have no effects</font></b><br/>
# Remember that pure functions only return a value and do nothing else. They don’t mutate any element of the outside world (<font color='brown'>with outside being relative to the function itself</font>), they don’t mutate their arguments, and they don’t explode (<font color='brown'>or throw an exception, or anything else</font>) <b>if an error occurs. They can return an exception or anything else, such as an error message. But they must return it, not throw it, nor log it, nor print it.</b>
#
# # <font color='darkblue'>Functions in Python </font>
#
# ## <font color='darkgreen'>Functional methods </font>
# A method can be functional if it respects the <b>requirements of a pure function:</b>
# * It must not mutate anything outside the function. No internal mutation may be visible from the outside.
# * It must not mutate its argument.
# * It must not throw errors or exceptions.
# * It must always return a value.
# * When called with the same argument, it must always return the same result.
#
# ## <font color='darkgreen'>Composing functions</font>
# If you think about functions as methods, composing them seems simple:
# +
def square(x):
return x * x
def triple(x):
return x * 3
print("Composing example: square(triple(2)) = {}".format(square(triple(2))))
# -
# But this isn’t function composition. In this example, you’re composing function applications. <b><a href='https://en.wikipedia.org/wiki/Function_composition'>Function composition</a> is a binary operation on functions, just as addition is a binary operation on numbers</b>. So you can compose functions programmatically, using a method:
# +
def compose_ex(f1, f2):
def cmp(x):
return f1(f2(x))
return cmp
print("compose_ex(square, triple)(2)={}".format(compose_ex(square, triple)(2)))
# -
# Now you can start seeing how powerful this concept is!
#
# <b><font size='3'>Problem with function compositions</font></b><br/>
# <b>In imperative programming, each function is evaluated before the result is passed as the input of the next function. But in functional programming, composing functions means building the resulting function without evaluating anything.</b> Composing functions is powerful because functions can be composed without being evaluated. But as a consequence, <font color='red'>applying the composed function results in numerous embedded method calls that will eventually overflow the stack</font>. This can be demonstrated with a simple example (<font color='brown'>using lambdas, which will be introduced in the next section</font>):
# +
def addOne(x):
return x + 1
addN = compose_ex(addOne, addOne)
fnum = 1000
for i in range(fnum):
addN = compose_ex(addN, addOne)
print('addN(1) = {}'.format(addN(1)))
# -
# This program will overflow the stack when fnum is around 1000. Hopefully you won’t usually compose several thousand functions, but you should be aware of this.
#
# # <font color='darkblue'>Advanced function features</font>
# You’ve seen how to create apply and compose functions. You’ve also learned that functions can be represented by methods or by objects. But you haven’t answered a fundamental question: why do you need function objects? Couldn’t you simply use methods? Before answering this question, you have to consider the problem of the functional representation of multiargument methods.
#
# ## <font color='darkgreen'>Applying curried functions</font>
# You’ve seen how to write curried function types and how to implement them. But how do you apply them? Well, just like any function. You apply the function to the first argument, and then apply the result to the next argument, and so on until the last one. For example, you can apply the add function to 3 and 5:
# +
import functools
def addTwo(x, y):
return x + y
addTenWith = functools.partial(addTwo, x = 10)
print("addTenWith(5) = {}".format(addTenWith(y=5)))
# -
# Here, we leverage package <b><a href='https://docs.python.org/2/library/functools.html'>functools</a></b> to carry out the curring operation. It would be great if you could apply a function just by writing its name followed by its argument. It would allow coding, as in Scala:
# ```scala
# addTwo(10)(5)
# ```
#
# ## <font color='darkgreen'>Higher-order functions</font>
# We wrote a method to compose functions before. That method was a functional one, taking as its argument a tuple of two functions and returning a function. But instead of using a method, you could use a function! This special kind of function, <b>taking functions as its arguments and returning functions, is called a <font color='darkblue'>higher-order function</font></b> (<font color='brown'>HOF</font>).
#
# Below we introduce higher-order function <font color='blue'>compose2</font> to compose <font color='blue'>f(g(x))</font>; and <font color='blue'>andThen</font> to compose the opposite direction (<font color='brown'>f=square; g=triple</font>):
# +
from fpu.fp import compose2, andThen
triple_and_square = compose2(square, triple)
square_andThen_square = andThen(square, triple)
print("triple_and_square(2) = {}".format(triple_and_square(2))) # (2 * 3)^2 = 36
print("square_andThen_square(2) = {}".format(square_andThen_square(2))) # 2^2 * 3 = 12
# -
# ## <font color='darkgreen'>Closures</font>
# You’ve seen that pure functions must not depend on anything other than their arguments to evaluate their return values. Methods may even access static members of other classes. I’ve said that functional methods are methods that respect referential transparency, which means they have no observable effects besides returning a value. The same is true for functions. <b>Functions are pure if they don’t have observable side effects.</b>
#
# <b><a href='https://en.wikipedia.org/wiki/Closure_(computer_programming)'>Closures</a> are compatible with pure functions if you consider them as additional implicit arguments.</b> One simple example as below:
# +
def addN(n):
def add(x):
return x + n
return add
add5 = addN(5)
print("add5(10) = {}".format(add5(10)))
# -
# The method <font color='blue'>addN</font> will return a closure binding with a free variable <i>n</i>. From above example, <font color='blue'>add5</font> is binding with free variable <font color='blue'>n=5</font>. For more about Python closure, you can refer to this post: <a href='http://www.codedata.com.tw/java/understanding-lambda-closure-3-python-support/'>認識 Lambda/Closure(3)Python 對 Lambda/Closure 的支援</a>.
| Ch2_UsingFunction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table width = "100%">
# <tr style="background-color:white;">
# <!-- QWorld Logo -->
# <td style="text-align:left;width:200px;">
# <a href="https://qworld.net/" target="_blank"><img src="../images/QWorld.png"> </a></td>
# <td style="text-align:right;vertical-align:bottom;font-size:16px;">
# Prepared by <a href="https://gitlab.com/sabahuddin.ahmad" target="_blank"> <NAME> </a></td>
# </tr>
# </table>
#
# <hr>
# ## Examples for BQM Formulation
# In this notebook, we will learn how to formulate BQM for the Travelling Salesman problem.
# ### Travelling Salesman Problem (TSP)
# Recall, given a set of cities and corresponding distances between each pair of cities, the problem is to find the shortest possible route such that a salesman visits every city exactly once and returns to the starting point / hometown. So, eventually, the salesman maximizes his sales by minimizing the total cost of travelling between the nodes.
#
# The variable results in 1 if salesman passes node $i$ if its at position $p$ in the route.
# $$x_{ip}=
# \left\{
# \begin{array}{ll}
# 1, & \text{node i is at position p in the route} \\
# 0, & \text{otherwise} \\
# \end{array}
# \right.$$
#
# #### Objective Function
#
# The QUBO Objective function, including the constraints is,
# $$C(x) = A\sum_{(i,j) \in E} w_{ij} \sum_{p=1}^{N} x_{i,p} x_{j,p+1} + B\sum_{p=1}^{N} \left(1-\sum_{i=1}^{N}x_{i,p}\right)^2 + B\sum_{i=1}^{N} \left(1-\sum_{p=1}^{N}x_{i,p}\right)^2 + B\sum_{(i,j) \notin E} \sum_{p=1}^{N} x_{i,p} x_{j,p+1}$$
#
# Here, A should be small enough so not to violate the constraints whereas B is a positive penalty parameter that should be sufficiently large for the constraints to be considered. One possibility can be $0<A\max (w_{ij})<B$ (assuming that $w_{ij} \geq 0$ for each $(i,j) \in E$).
#
#
# ### References
# ***
| notebooks/BQM_Examples_TSP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MLG: Lab 5 (Part 2)
#
# ## Exercise 3: Self-organized representation of a collection of images
#
# ### Dataset information
#
# The Wang image database is a database of images grouped by class. In this dataset, we downloaded only the 1000 images. You can download them on this website: http://wang.ist.psu.edu/docs/home.shtml more precisely at this address: http://wang.ist.psu.edu/~jwang/test1.tar
#
# For each class we have 100 corresponding images:
# - 0-100: Africans
# - 100-200: Beaches
# - 200-300: Monuments
# - etc...
#
# All the images in this dataset are 250x166 pixels or 166x250 pixels.
import numpy as np
import matplotlib.pylab as pl
import KohonenUtils
import WangImageUtilities
# %matplotlib inline
extractor = WangImageUtilities.ImageFeatureExtractor('Wang_Data')
# Here we load images from 300-399 and from 500-599
extractor.load_images(list_indices=list(np.arange(300, 400)) + list(np.arange(500, 600)))
# ### Dataset Visualization
pl.figure(figsize=(10, 20))
pl.subplot(121)
pl.imshow(extractor.images[2])
pl.axis('off')
pl.subplot(122)
pl.imshow(extractor.images[101])
_ = pl.axis('off')
# ### Clustering with SOM
method = 1
if method == 1:
histograms = extractor.extract_histogram()
elif method == 2:
histograms = extractor.extract_hue_histogram()
elif method == 3:
histograms = extractor.extract_color_histogram()
else:
print('Implement your own method for extracting features if you like!')
# +
kmap = KohonenUtils.KohonenMap(side_rows=8,
side_cols=8,
size_vector=histograms.shape[1])
n_iter = 5
learning_rate = KohonenUtils.ExponentialTimeseries(1, 0.05, n_iter*histograms.shape[0])
neighborhood_size = KohonenUtils.ExponentialTimeseries(2./3 * kmap._map.shape[1], 1, n_iter*histograms.shape[0])
names = []
for index in extractor.image_indices:
names.append(str(index))
kmap.train(histograms, names, n_iter, learning_rate, neighborhood_size)
# -
kmap.plot_umatrix(plot_empty=True, plot_names=True)
# To simplify analysis, we can write the images to HTML. This is easier to see and analyze.
# Writes a 'som.html' file
# You can visualize the results and click on a neuron to see other images assigned to this neuron
extractor.to_html('som.html', kmap)
# <h3>REPORT (date of submission: 27.5 before 23:55)</h3>
#
# 1. Explain the three different methods we provided for extracting features. What do you understand about them (input, output), how do they work ?</p>
#
# 2. Try the SOM with several (minimum 3) different sets of images (always 100 images per class and at least two classes). You can change the size of the Self-Organizing Map as well as its parameters.
# <ul>
# <li> Note that we provided three methods for extracting features: for at least one of the test you do try with all three methods and compare the results.
# <li> Include for each experiment an U-Matrix (with images - print screen of html)) that you find interesting. Explain why you find it interesting (what are the input images, with which features you trained your Self-Organizing Map, with which parameters, and how it is reflected in the results)...
# </ul>
# </p>
| src/SOM_part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xml.etree.ElementTree as ET
import pandas as pd
# # crea csv con datos de camiones
# +
tree = ET.parse('C:/Users/dazac/Downloads/datos mina/datos produccion/camiones.xml')
root = tree.getroot()
trucks = root.findall('Truck')
ids = []
max_vel = []
pos = []
stor = []
curr = []
disch = []
loade = []
spott = []
for truck in trucks:
ids.append(truck.get('ID').replace('-','_'))
Nombre.append(truck.get(''))
max_vel.append(truck.find('maximumPossibleVelocity').text)
pos.append(truck.find('positionedAt').get('resource'))
stor.append(truck.find('storageCapacity').text)
curr.append(truck.find('currentLoad').text)
disch.append(truck.find('dischargeRate').text)
loade.append(truck.find('loadedSpeed').text)
spott.append(truck.find('spottingTime').text)
datos = {
'ID': ids,
'VelocidadMaxima': max_vel,
'Posicion': pos,
'Capacidad':stor,
'CargaActual': curr,
'Descarga': disch,
'VelocidadCargado':loade,
'Spotting':spott
}
df_trucks = pd.DataFrame(datos)
df_trucks.to_csv(path_or_buf='C:/Users/dazac/Downloads/datos mina/datos produccion/camiones_info.csv', sep=',', header=True, index=False)
# -
# # crea csv ubicaciones/props camiones
# +
df_camiones_info = pd.read_csv('C:/Users/dazac/Downloads/datos mina/datos produccion/camiones_info.csv', delimiter=',')
df_cargas = pd.read_csv('C:/Users/dazac/Downloads/datos mina/Cargas.csv', delimiter=';')
df_descargas = pd.read_csv('C:/Users/dazac/Downloads/datos mina/Descargas.csv', delimiter=';')
object_type = []
name = []
x = []
y = []
z = []
lenght = []
width = []
height = []
path = []
prop = []
value = []
other = []
def adaptar_nombres(nombres):
nombres = nombres.replace('-','_').replace('.','')
return nombres
def modifica_nombres(nombre):
if nombre in df_camiones_info['Posicion'].values:
nombre = 'Input@'+nombre
else:
nombre = 'Output@'+nombre
return nombre
# +
for truck_row in range(len(df_trucks)):
#obtener pto inicial
punto_inicial = df_camiones_info[ df_camiones_info.ID == df_trucks.ID[truck_row] ].Posicion.values[0]
punto_inicial = modifica_nombres(punto_inicial)
if punto_inicial == 'RS1_STOCKCR04_SME':
punto_inicial = 'RF10A_4390_1025_1'
velocidad_maxima = df_camiones_info[ df_camiones_info.ID == df_trucks.ID[truck_row] ].VelocidadMaxima.values[0]
object_type.append('Camion')
name.append(df_trucks.ID.iloc[truck_row])
x.append('0')
y.append('0')
z.append('0')
lenght.append('0,5')
width.append('0,5')
height.append('0,25')
path.append('0')
prop.append('PuntoInicial')
value.append(punto_inicial)
other.append('None')
object_type.append('Camion')
name.append(df_trucks.ID.iloc[truck_row])
x.append('0')
y.append('0')
z.append('0')
lenght.append('0,5')
width.append('0,5')
height.append('0,25')
path.append('0')
prop.append('InitialDesiredSpeed')
value.append(velocidad_maxima)
other.append('KilometersPerHour')
object_type.append('Camion')
name.append(df_trucks.ID.iloc[truck_row])
x.append('0')
y.append('0')
z.append('0')
lenght.append('0,5')
width.append('0,5')
height.append('0,25')
path.append('0')
prop.append('InitialNumberInSystem')
value.append('1')
other.append('none')
datos_dict = {
'Tipo objeto': object_type,
'Nombre': name,
'x':x,
'y':y,
'z':z,
'lenght': lenght,
'width': width,
'height': height,
'path': path,
'prop': prop,
'value': value,
'other': other
}
df_trucks_locations = pd.DataFrame(datos_dict)
df_trucks_locations.to_csv(path_or_buf='C:/Users/dazac/Downloads/datos mina/datos produccion/creador_camiones.csv', sep='#', header=False, index=False)
| scripts/xml_a_csv_camiones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tabular ANN for Exploration of DEAP Dataset
# SVM for quadrants
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + language="javascript"
# utils.load_extension('collapsible_headings/main')
# utils.load_extension('hide_input/main')
# utils.load_extension('execute_time/ExecuteTime')
# utils.load_extension('code_prettify/code_prettify')
# utils.load_extension('scroll_down/main')
# utils.load_extension('jupyter-js-widgets/extension')
# -
from fastai.tabular import *
from sklearn.model_selection import train_test_split
from sklearn import datasets, svm, metrics
PATH = "/media/tris/tris_files/EEG_datasets/DMD/tabular"
df_raw = pd.read_csv('/media/tris/tris_files/EEG_datasets/DMD/tabular/dmd_deap_100modes_trials2.csv')
df_raw
df_raw.iloc[[40],:]
os.makedirs('tmp', exist_ok=True)
df_raw.to_feather('tmp/eeg-raw')
import pandas as pd
df_raw = pd.read_feather('tmp/eeg-raw') #lol raw sashimis and sushis
df_raw.head()
df_raw.iloc[[500],:]
path = '/media/tris/tris_files/EEG_datasets/DEAP_data_preprocessed_python/s01.dat'
df_read = pickle.load(open(path, 'rb'), encoding='latin1')
labels=df_read['labels']
df_tmp = pd.DataFrame(labels, columns=['valence','arousal','dominance','liking'])
df=df_tmp
for n in range (2,10):
path = '/media/tris/tris_files/EEG_datasets/DEAP_data_preprocessed_python/s0'+str(n)+'.dat'
df_read = pickle.load(open(path, 'rb'), encoding='latin1')
labels=df_read['labels']
df_tmp = pd.DataFrame(labels, columns=['valence','arousal','dominance','liking'])
df=df.append(df_tmp, ignore_index=True)
for n in range (10,33):
path = '/media/tris/tris_files/EEG_datasets/DEAP_data_preprocessed_python/s'+str(n)+'.dat'
df_read = pickle.load(open(path, 'rb'), encoding='latin1')
labels=df_read['labels']
df_tmp = pd.DataFrame(labels, columns=['valence','arousal','dominance','liking'])
df=df.append(df_tmp, ignore_index=True)
df=df.div(9) #normalize
df.loc[(df['valence'] >= 0.5) & (df['arousal'] >= 0.5), 'emotion_quad'] = 'HVHA'
df.loc[(df['valence'] <= 0.5) & (df['arousal'] >= 0.5), 'emotion_quad'] = 'LVHA'
df.loc[(df['valence'] <= 0.5) & (df['arousal'] <= 0.5), 'emotion_quad'] = 'LVLA'
df.loc[(df['valence'] >= 0.5) & (df['arousal'] <= 0.5), 'emotion_quad'] = 'HVLA'
df.tail()
emotion_quad = df['emotion_quad']
# df_learn = df_raw.join(emotion_quad)
df_raw.head()
fig, axs = plt.subplots(1, 5, figsize=(15, 5))
axs[0].hist(df_raw.Real_Comp1_Mode1)
axs[0].set_title('Real_Comp1_Mode1')
axs[1].hist(df_raw.fn3)
axs[1].set_title('fn3')
axs[2].hist(df_raw.Imag_Comp1_Mode1)
axs[2].set_title('Imag_Comp1_Mode1')
axs[3].hist(df_raw.zeta49)
axs[3].set_title('zeta49')
axs[4].hist(df.emotion_quad)
axs[4].set_title('label')
X_train, X_test, y_train, y_test = train_test_split(
df_raw, emotion_quad, test_size=0.2, shuffle=True)
classifier = svm.SVC(gamma=0.2, kernel='poly', degree=5)
classifier.fit(X_train, y_train)
predicted = classifier.predict(X_test)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, predicted)))
disp = metrics.plot_confusion_matrix(classifier, X_test, y_test)
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
| src/models/2020-07-27_DMD-SVM_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import altair as alt
import os
os.chdir("/Volumes/UBC/Block5/551/Project_MDS/dashboard-project-cryptocurrency_db")
source1 = pd.read_csv("price.csv")
#source1 = source1[(source1.Name == 'bitcoin_cash')]
source1['New_date1']=pd.to_datetime(source1['New_date1'], format='%Y-%m-%d')
#source1.head()
#source1.info()
source1['Name'].unique()
# +
alt.data_transformers.disable_max_rows()
unique_cryptocurrency = source1['Name'].unique()
#data_start = source1['New_date'].min()
#data_end = source1['New_date'].max()
base_close = alt.Chart(source1).mark_bar(size=5).encode(
x=alt.X('New_date1', sort='x'),
y=alt.Y('Close'),
color=alt.value("#BABABA"),
opacity=alt.value(0.8),
tooltip=['Name', 'Close']
)
three_days_rolling_closemean_line = alt.Chart(source1).mark_line().encode(
x=alt.X('New_date1', sort='x'),
y=alt.Y('RollingAvg3_Close'),
color=alt.value("#FFB319"),
tooltip=['Name', 'RollingAvg3_Close']
)
seven_days_rolling_closemean_line = alt.Chart(source1).mark_line().encode(
x=alt.X('New_date1', sort='x'),
y=alt.Y('RollingAvg7_Close'),
color=alt.value("#FFDD99"),
tooltip=['Name', 'RollingAvg7_Close']
)
# A dropdown filter
cryptocurrency_dropdown = alt.binding_select(options=unique_cryptocurrency)
cryptocurrency_select = alt.selection_single(fields=[' '], bind=cryptocurrency_dropdown, name="Cryptocurrency")
# -
(base_close +
three_days_rolling_closemean_line +
seven_days_rolling_closemean_line ).configure_axis(
grid=False
).properties(
width=1400,
height=300
).add_selection(
cryptocurrency_select
).transform_filter(
cryptocurrency_select
).properties(title="Dropdown Filtering")
# +
# Filter needs to be fixed
# https://altair-viz.github.io/gallery/multiple_interactions.html
| reports/.ipynb_checkpoints/Graph1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # France's Effectiveness in Attack
# import relevant libraries
# %matplotlib inline
import json
import matplotlib
from pandas.io.json import json_normalize
import numpy as np
import seaborn as sns
import pandas as pd
import os
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib.patches import Arc, Rectangle, ConnectionPatch
from matplotlib.offsetbox import OffsetImage
import squarify
from functools import reduce
# ## Draw the pitch
def draw_pitch(ax):
# size of the pitch is 120, 80
#Create figure
#Pitch Outline & Centre Line
plt.plot([0,0],[0,80], color="black")
plt.plot([0,120],[80,80], color="black")
plt.plot([120,120],[80,0], color="black")
plt.plot([120,0],[0,0], color="black")
plt.plot([60,60],[0,80], color="black")
#Left Penalty Area
plt.plot([14.6,14.6],[57.8,22.2],color="black")
plt.plot([0,14.6],[57.8,57.8],color="black")
plt.plot([0,14.6],[22.2,22.2],color="black")
#Right Penalty Area
plt.plot([120,105.4],[57.8,57.8],color="black")
plt.plot([105.4,105.4],[57.8,22.5],color="black")
plt.plot([120, 105.4],[22.5,22.5],color="black")
#Left 6-yard Box
plt.plot([0,4.9],[48,48],color="black")
plt.plot([4.9,4.9],[48,32],color="black")
plt.plot([0,4.9],[32,32],color="black")
#Right 6-yard Box
plt.plot([120,115.1],[48,48],color="black")
plt.plot([115.1,115.1],[48,32],color="black")
plt.plot([120,115.1],[32,32],color="black")
#Prepare Circles
centreCircle = plt.Circle((60,40),8.1,color="black",fill=False)
centreSpot = plt.Circle((60,40),0.71,color="black")
leftPenSpot = plt.Circle((9.7,40),0.71,color="black")
rightPenSpot = plt.Circle((110.3,40),0.71,color="black")
#Draw Circles
ax.add_patch(centreCircle)
ax.add_patch(centreSpot)
ax.add_patch(leftPenSpot)
ax.add_patch(rightPenSpot)
#Prepare Arcs
# arguments for arc
# x, y coordinate of centerpoint of arc
# width, height as arc might not be circle, but oval
# angle: degree of rotation of the shape, anti-clockwise
# theta1, theta2, start and end location of arc in degree
leftArc = Arc((9.7,40),height=16.2,width=16.2,angle=0,theta1=310,theta2=50,color="black")
rightArc = Arc((110.3,40),height=16.2,width=16.2,angle=0,theta1=130,theta2=230,color="black")
#Draw Arcs
ax.add_patch(leftArc)
ax.add_patch(rightArc)
# ## Plotting all shots
# +
data_id = [7546, 7563, 8655, 8658, 7530, 7580, 8649]
directory = '/Users/steven/Documents/Developer/data_science/football/data/raw/'
# consequently read the json and concatenate into a pre-defined dataframe
all_france = pd.DataFrame()
for i in data_id:
with open(directory + str(i) + '.json') as data_file:
data = json.load(data_file)
df = json_normalize(data, sep = '_')
if all_france.empty:
all_france = df
else:
all_france = pd.concat([all_france, df], join = 'outer', sort = False)
# +
shot_data = all_france[(all_france['type_name'] == "Shot") & (all_france['team_name'] == 'France')]
fig=plt.figure()
fig.set_size_inches(7, 5)
ax=fig.add_subplot(1,1,1)
draw_pitch(ax)
plt.axis('off')
for i in range(len(shot_data)):
# can also differentiate different half by different color
color = "red" if shot_data.iloc[i]['shot_outcome_name'] == "Goal" else "black"
ax.annotate("", xy = (shot_data.iloc[i]['shot_end_location'][0], shot_data.iloc[i]['shot_end_location'][1]), xycoords = 'data',
xytext = (shot_data.iloc[i]['location'][0], shot_data.iloc[i]['location'][1]), textcoords = 'data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3", color = color),)
plt.ylim(0, 80)
plt.xlim(0, 120)
plt.show()
# -
# ### The plot is not terribly useful but we can see that a majority of the French goals came from inside the 6-yard boxes. Even though they attempted a lot of long-range efforts, only three found the back of the net, including the stunner by Pavard during the semi-final
def draw_half_pitch(ax):
# focus on only half of the pitch
#Pitch Outline & Centre Line
Pitch = Rectangle([60,0], width = 60, height = 80, fill = False)
#Right Penalty Area
RightPenalty = Rectangle([105.4,22.3], width = 14.6, height = 35.3, fill = False)
#Right 6-yard Box
RightSixYard = Rectangle([115.1,32], width = 4.9, height = 16, fill = False)
#Prepare Circles
centreCircle = Arc((60,40),width = 8.1, height = 8.1, angle=0,theta1=270,theta2=90,color="black")
centreSpot = plt.Circle((60,40),0.71,color="black")
rightPenSpot = plt.Circle((110.3,40),0.71,color="black")
rightArc = Arc((110.3,40),height=16.2,width=16.2,angle=0,theta1=130,theta2=230,color="black")
element = [Pitch, RightPenalty, RightSixYard, centreCircle, centreSpot, rightPenSpot, rightArc]
for i in element:
ax.add_patch(i)
# +
fig=plt.figure()
fig.set_size_inches(7, 5)
ax=fig.add_subplot(1,1,1)
draw_half_pitch(ax)
plt.axis('off')
# draw the scatter plot for goals
x_coord_goal = [location[0] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] == "Goal"]
y_coord_goal = [location[1] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] == "Goal"]
# shots that end up with no goal
x_coord = [location[0] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] != "Goal"]
y_coord = [location[1] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] != "Goal"]
# put the two scatter plots on to the pitch
ax.scatter(x_coord_goal, y_coord_goal, c = 'red', label = 'goal')
ax.scatter(x_coord, y_coord, c = 'blue', label = 'shots')
plt.ylim(-.5, 80)
plt.xlim(-.5, 130)
plt.legend(loc = 'best')
plt.axis('off')
plt.show()
# +
# we use a joint plot to see the density of the shot distribution across the 2 axes of the pitch
joint_shot_chart = sns.jointplot(x_coord, y_coord, stat_func=None,
kind='scatter', space=0, alpha=0.5)
joint_shot_chart.fig.set_size_inches(7,5)
ax = joint_shot_chart.ax_joint
# overlaying the plot with a pitch
draw_half_pitch(ax)
ax.set_xlim(0.5,120.5)
ax.set_ylim(0.5,80.5)
# draw the scatter plot for goals
x_coord_goal = [location[0] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] == "Goal"]
y_coord_goal = [location[1] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] == "Goal"]
# shots that end up with no goal
x_coord = [location[0] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] != "Goal"]
y_coord = [location[1] for i, location in enumerate(shot_data["location"]) if shot_data.iloc[i]['shot_outcome_name'] != "Goal"]
# put the two scatter plots on to the pitch
ax.scatter(x_coord, y_coord, c = 'b', label = 'shots')
ax.scatter(x_coord_goal, y_coord_goal, c = 'r', label = 'goal')
# Get rid of axis labels and tick marks
ax.set_xlabel('')
ax.set_ylabel('')
joint_shot_chart.ax_marg_x.set_axis_off()
ax.set_axis_off()
plt.ylim(-.5, 80)
plt.axis('off')
plt.show()
| code/france_attack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GPU-accelerated interactive visualization of single cells with RAPIDS, Scanpy and Plotly Dash
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# In this notebook, we cluster cells based on a single-cell RNA-seq count matrix, and produce an interactive visualization of the clustered cells that allows for further analysis of the data in a browser window.
# For demonstration purposes, we use a dataset of ~70,000 human lung cells from Travaglini et al. 2020 (https://www.biorxiv.org/content/10.1101/742320v2) and label cells using the ACE2, TMPRSS2, and EPCAM marker genes. See the README for instructions to download this dataset.
# ## Import requirements
# +
import numpy as np
import scanpy as sc
import anndata
import sys
import time
import cudf
import cupy as cp
from cuml.decomposition import PCA
from cuml.manifold import TSNE
from cuml.cluster import KMeans
import rapids_scanpy_funcs
import warnings
warnings.filterwarnings('ignore', 'Expected ')
# -
# We use the RAPIDS memory manager on the GPU to control how memory is allocated.
# +
import rmm
rmm.reinitialize(
managed_memory=True, # Allows oversubscription
pool_allocator=False, # default is False
devices=0, # GPU device IDs to register. By default registers only GPU 0.
)
cp.cuda.set_allocator(rmm.rmm_cupy_allocator)
# -
# ## Input data
# In the cell below, we provide the path to the `.h5ad` file containing the count matrix to analyze. Please see the README for instructions on how to download the dataset we use here.
#
# We recommend saving count matrices in the sparse .h5ad format as it is much faster to load than a dense CSV file. To run this notebook using your own dataset, please see the README for instructions to convert your own count matrix into this format. Then, replace the path in the cell below with the path to your generated `.h5ad` file.
input_file = "../data/krasnow_hlca_10x_UMIs.sparse.h5ad"
# ## Set parameters
# +
# marker genes
RIBO_GENE_PREFIX = "RPS" # Prefix for ribosomal genes to regress out
markers = ["ACE2", "TMPRSS2", "EPCAM"] # Marker genes for visualization
# filtering cells
min_genes_per_cell = 200 # Filter out cells with fewer genes than this expressed
max_genes_per_cell = 6000 # Filter out cells with more genes than this expressed
# filtering genes
n_top_genes = 5000 # Number of highly variable genes to retain
# PCA
n_components = 50 # Number of principal components to compute
# KNN
n_neighbors = 15 # Number of nearest neighbors for KNN graph
knn_n_pcs = 50 # Number of principal components to use for finding nearest neighbors
# UMAP
umap_min_dist = 0.3
umap_spread = 1.0
# -
# ## Load and Prepare Data
# We load the sparse count matrix from an `h5ad` file using Scanpy. The sparse count matrix will then be placed on the GPU.
# %%time
adata = sc.read(input_file)
adata = adata.T
adata.shape
# %%time
genes = cudf.Series(adata.var_names)
barcodes = cudf.Series(adata.obs_names)
sparse_gpu_array = cp.sparse.csr_matrix(adata.X)
# ## Preprocessing
# ### Filter
# We filter the count matrix to remove cells with an extreme number of genes expressed.
# %%time
sparse_gpu_array, barcodes = rapids_scanpy_funcs.filter_cells(sparse_gpu_array, min_genes=min_genes_per_cell, max_genes=max_genes_per_cell, barcodes=barcodes)
# Some genes will now have zero expression in all cells. We filter out such genes.
# %%time
sparse_gpu_array, genes = rapids_scanpy_funcs.filter_genes(sparse_gpu_array, genes, min_cells=1)
# The size of our count matrix is now reduced.
sparse_gpu_array.shape
# ### Normalize
# We normalize the count matrix so that the total counts in each cell sum to 1e4.
# %%time
sparse_gpu_array = rapids_scanpy_funcs.normalize_total(sparse_gpu_array, target_sum=1e4)
# Next, we log transform the count matrix.
# %%time
sparse_gpu_array = sparse_gpu_array.log1p()
# ### Select Most Variable Genes
# We convert the count matrix to an annData object.
# %%time
adata = anndata.AnnData(sparse_gpu_array.get())
adata.var_names = genes.to_pandas()
# Using scanpy, we filter the count matrix to retain only the 5000 most variable genes.
# %%time
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, flavor="cell_ranger")
adata = adata[:, adata.var.highly_variable]
# ### Regress out confounding factors (number of counts, ribosomal gene expression)
# We can now perform regression on the count matrix to correct for confounding factors - for example purposes, we use the number of counts and the expression of ribosomal genes. Many workflows use the expression of mitochondrial genes (named starting with `MT-`).
#
# Before regression, we save the 'raw' expression values of the ACE2 and TMPRSS2 genes to use for labeling cells afterward. We will also store the expression of an epithelial marker gene (EPCAM).
# +
# %%time
tmp_norm = sparse_gpu_array.tocsc()
raw_marker_expressions = {}
for marker in markers:
raw_marker_expressions[marker] = tmp_norm[:, genes[genes == marker].index[0]].todense().ravel()
del tmp_norm
# -
# We now calculate the total counts and the percentage of ribosomal counts for each cell.
# +
# %%time
genes = adata.var_names
ribo_genes = adata.var_names.str.startswith(RIBO_GENE_PREFIX)
n_counts = adata.X.sum(axis=1)
percent_ribo = (adata.X[:,ribo_genes].sum(axis=1) / n_counts).ravel()
n_counts = cp.array(n_counts).ravel()
percent_ribo = cp.array(percent_ribo).ravel()
# -
# And perform regression:
# %%time
sparse_gpu_array = cp.sparse.csc_matrix(adata.X)
sparse_gpu_array = rapids_scanpy_funcs.regress_out(sparse_gpu_array, n_counts, percent_ribo)
# ### Scale
# Finally, we scale the count matrix to obtain a z-score and apply a cutoff value of 10 standard deviations, obtaining the preprocessed count matrix.
# %%time
sparse_gpu_array = rapids_scanpy_funcs.scale(sparse_gpu_array, max_value=10)
# ## Cluster & Visualize
# We store the preprocessed count matrix as an AnnData object, which is currently in host memory.
# We also add the barcodes of the filtered cells, and the expression levels of the marker genes, to the annData object.
# +
# %%time
adata = anndata.AnnData(sparse_gpu_array.get())
adata.var_names = genes
adata.obs_names = barcodes.to_pandas()
for marker in markers:
adata.obs[marker + "_raw"] = raw_marker_expressions[marker].get()
# -
# ### Reduce
# We use PCA to reduce the dimensionality of the matrix to its top 50 principal components.
# %%time
adata.obsm["X_pca"] = PCA(n_components=n_components, output_type="numpy").fit_transform(adata.X)
# ### UMAP + Louvain
# We visualize the cells using the UMAP algorithm in Rapids. Before UMAP, we need to construct a k-nearest neighbors graph in which each cell is connected to its nearest neighbors. This can be done conveniently using rapids functionality already integrated into Scanpy.
#
# Note that Scanpy uses an approximation to the nearest neighbors on the CPU while the GPU version performs an exact search. While both methods are known to yield useful results, some differences in the resulting visualization and clusters can be observed.
# %%time
sc.pp.neighbors(adata, n_neighbors=n_neighbors, n_pcs=knn_n_pcs, method='rapids')
# The UMAP function from Rapids is also integrated into Scanpy.
# %%time
sc.tl.umap(adata, min_dist=umap_min_dist, spread=umap_spread, method='rapids')
# Finally, we use the Louvain algorithm for graph-based clustering, once again using the `rapids` option in Scanpy.
# %%time
sc.tl.louvain(adata, flavor='rapids')
# We plot the cells using the UMAP visualization, using the Louvain clusters as labels.
sc.pl.umap(adata, color=["louvain"])
# ## Defining re-clustering function for interactive visualization
# As we have shown above, the speed of RAPIDS allows us to run steps like dimension reduction, clustering and visualization in seconds or even less. In the sections below, we create an interactive visualization that takes advantage of this speed by allowing users to cluster and analyze selected groups of cells at the click of a button.
# First, we create a function named `re_cluster`. This function can be called on selected groups of cells. According to the function defined below, PCA, KNN, UMAP and Louvain clustering will be re-computed upon the selected cells. You can customize this function for your desired analysis.
def re_cluster(adata):
#### Function to repeat clustering and visualization on subsets of cells
#### Runs PCA, KNN, UMAP and Louvain clustering on selected cells.
adata.obsm["X_pca"] = PCA(n_components=n_components).fit_transform(adata.X).get()
sc.pp.neighbors(adata, n_neighbors=n_neighbors, n_pcs=knn_n_pcs, method='rapids')
sc.tl.umap(adata, min_dist=umap_min_dist, spread=umap_spread, method='rapids')
sc.tl.louvain(adata, flavor='rapids')
return adata
# ## Creating an interactive visualization with Plotly Dash
# <img src="https://github.com/avantikalal/rapids-single-cell-examples/blob/visualization/images/dashboard.png?raw=true" alt="Interactive Dashboard" width="400"/>
# Below, we create the interactive visualization using the `adata` object and the re-clustering function defined above. To learn more about how this visualization is built, see `visualize.py`.
# When you run the cell below, it returns a link. Click on this link to access the interactive visualization within your browser.
#
# Once opened, click the `Directions` button for instructions.
# +
import visualize
import importlib
importlib.reload(visualize)
v = visualize.Visualization(adata, markers, re_cluster_callback=re_cluster)
v.start('10.33.227.161')
selected_cells = v.new_df
# -
# Within the dashboard, you can select cells using a variety of methods. You can then cluster, visualize and analyze the selected cells using the tools provided. Click on the `Directions` button for details.
#
# To export the selected cells and the results of your analysis back to the notebook, click the `Export to Dataframe` button. This exports the results of your analysis back to this notebook, and closes the interactive dashboard.
#
# See the next section for instructions on how to use the exported data.
# ## Exporting a selection of cells from the dashboard
# If you exported a selection cells from the interactive visualization, your selection will be available here as a data frame named `selected_cells`. The `labels` column of this dataframe contains the newly generated cluster labels assigned to these selected cells.
print(selected_cells.shape)
selected_cells.head()
# You can link the selected cells to the original `adata` object using the cell barcodes.
adata_selected_cells = adata[selected_cells.barcode.to_array(),:]
adata_selected_cells
| notebooks/hlca_lung_gpu_analysis-visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>
# <center>Problema 4</center>
# </h1>
#
# <center><a href="https://github.com/Luis2501"><NAME></a></center>
#
# <br>
#
# <center>Facultad de Ciencias Físico Matemáticas, Universidad Autonoma de Coahuila</center>
#
# <br>
#
# <center><b>E-mail:</b> <EMAIL></center>
# __________________
# La carga a partir de la densidad radial de carga lineal, $\sigma$, sobre el área de la sección transversal del cilindro, está definida por:
#
# $$q_{total}=\int_A \sigma(r)dA(r)$$
#
# donde, dA es el diferencial de área de la base del cilindro:
#
# $$q_{total}=\int_A \sigma(r)(2\pi r dr)$$
#
# Sabemos que la densidad tiene la forma:
#
# $$\sigma(r)=\sigma_0 \left(1-\frac{r^2}{a^2} \right)$$
#
# donde
#
# $$\sigma_0 =1.3 \times 10^{-6} C/m^2 , \; \; a = 1 \times 10^{-3} \; m$$
#
# Y el campo eléctrico:
#
# $$\vec{E}=\frac{kq_{total}}{d^2}\hat r$$
#
# d: distancia de la superficie al espacio a evaluar el campo eléctrico. ($d\gt r$)
#
#
# a) Determina la distribución de la carga y campo eléctrico (a distancias $d \in \{ r+r/4,r+r/2\}$ ) en función al radio utilizando 3 métodos de integración.
#
# b) Obtener la solución analítica, comparar los errores de distintos métodos y detallar análisis.
# ## Solución
# ____________
# Para comenzar, empezamos importando las librerias necesarias.
# +
import plotly.graph_objects as go
import numpy as np
import sys
sys.path.append("../")
from PhysicsPy.Integration import *
# -
# Se importa el módulo `Integration` de `PhysicsPy`, ahi se encuentran los métodos a utilizar.
#
# Ahora, crearemos una clase `Carga` que nos permita crear `cilindros` con distintas características, así podremos conocer su carga.
class Carga:
def __init__(self, sigma_0, a):
self.sigma_0, self.a = sigma_0, a
def __call__(self, r):
sigma_0, a = self.sigma_0, self.a
return np.array(sigma_0*(1 - (r**2/a**2))*2*np.pi*r)
# Creamos el `Cilindro` con las condiciones que nos imponen.
#
# Mediante el módulo `Integration` iteraremos en cada método para obtener las distintas soluciones.
# +
Cilindro = Carga(1.3e-6, 1e-3)
Methods = [Riemann, Trapeze, Midpoint, Simpson1_3, Simpson3_8]
Names = ["Riemman", "Trapeze", "Midpoint", "Simpson1_3", "Simpson3_8"]
Solutions = []
for class_name, name in zip(Methods, Names):
Solucion = class_name(Cilindro)
Solucion.Limits(0, Cilindro.a, 1e-7)
Integral = Solucion.Solve()
Solutions.append(Integral)
print(name + " Integration: ", Integral)
del Integral, Solucion
Solutions = np.array(Solutions)
# -
# Se obtiene los siguientes resultados:
#
# ``` terminal
# Riemman Integration: 2.0420352044130184e-12
# Trapeze Integration: 2.0420352044130184e-12
# Midpoint Integration: 2.042035235043548e-12
# Simpson1_3 Integration: 2.042035224833363e-12
# Simpson3_8 Integration: 2.042035204411995e-12
# ```
#
# Algo importamte a considerar es la demora de cada método. Debido que la integración de Riemman toma mucho más tiempo cuando disminuimos el tamaño de paso $h$.
# De la integral dada, tenemos que
#
# $$q_{total} = \int_{0} ^{a} \sigma_0 \left( 1 - \frac{r^2}{a^2} \right) 2 \pi r \; dr$$
#
# Entonces
#
# $$q_{total} = \sigma_0 \int_{0} ^{a} \left( 2 \pi r - \frac{2 \pi r^3}{a^2} \right) \; dr $$
#
# $$q_{total} = \sigma_0 \left[ \pi r^2 - \frac{\pi r^4}{2 a^2} \right]_{0} ^{a}$$
#
# Por lo tanto, la carga total es
#
# $$q_{total} = \sigma_{0} \left( \pi a^2 - \frac{\pi a^{2}}{2} \right) = \frac{\sigma_{0} \pi a^2}{2}$$
#
# Al sustituir $a$ y $\sigma_0$, obtenemos que la carga total es la siguiente.
# +
Q_total = (Cilindro.sigma_0*np.pi*(Cilindro.a**2))/2
print("Solución analítica: ", Q_total)
# -
# De esta manera podemos comparar los errores.
# +
#Error absoluto y error relativo
e_abs = Q_total - Solutions
e_r = (Q_total - Solutions)/Q_total
print("Método \t \t", "Error absoluto \t\t", "Error relativo \t\t", "Error Porcentual \n")
for i in range(len(Solutions)):
print(Names[i], "\t", e_abs[i], "\t", e_r[i], "\t ", e_r[i]*100, "%")
# -
# <div style="text-alig: justify">
# Podemos observar que el error en todos los métdos es muy pequeño. Sin embargo, debemos destacar que considerando la magnitud de las cantidades utilizadas, el método de Simpson $1/3$ arrojo una mejor aproximación.
# </div>
| Problema 4/Problema 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.12 64-bit (''torchenv'': conda)'
# metadata:
# interpreter:
# hash: 287fbb96b1d893c9729a73106bcc19570578fba614e979236aebdd25c879c138
# name: python3
# ---
# > This is a self-correcting activity generated by [nbgrader](https://nbgrader.readthedocs.io). Fill in any place that says `YOUR CODE HERE` or `YOUR ANSWER HERE`. Run subsequent cells to check your code.
# ---
# # Generate handwritten digits with a VAE (PyTorch)
#
# The goal here is to train a VAE to generate handwritten digits.
#
# ![VAE digits](images/vae_digits.png)
# ## Environment setup
# +
import os
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# -
# Setup plots
# %matplotlib inline
plt.rcParams['figure.figsize'] = 10, 8
# %config InlineBackend.figure_format = 'retina'
# +
# Import ML packages (edit this list if needed)
import torch
print(f'PyTorch version: {torch.__version__}')
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# -
# ## Data loading
# Load MNIST dataset
trainset = torchvision.datasets.MNIST(
root="./data", train=True, transform=transforms.ToTensor(), download=True
)
testset = torchvision.datasets.MNIST(
root="./data", train=False, transform=transforms.ToTensor(), download=True
)
# ### Question
#
# Create batch data loaders `trainloader` and `testloader` resp. for training and test datasets.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "3af1c62cd8f2c6b61d53831111324fed", "grade": true, "grade_id": "cell-7ee6e64de897e788", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
batch_size = 128
# YOUR CODE HERE
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=batch_size,
shuffle=True)
testloader = torch.utils.data.DataLoader(trainset,
batch_size=batch_size,
shuffle=True)
# -
# ## Model definition
#
# ### Question
#
# Complete the following class to create a variational autoencoder.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "6a41ad65b45e378060e1180f987522c1", "grade": true, "grade_id": "cell-aeffbaacaac863dc", "locked": false, "points": 2, "schema_version": 3, "solution": true, "task": false}
# VAE model
class VAE(nn.Module):
def __init__(self, input_dim=784, hidden_dim=400, latent_dim=20):
super(VAE, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, latent_dim)
self.fc3 = nn.Linear(hidden_dim, latent_dim)
self.fc4 = nn.Linear(latent_dim, hidden_dim)
self.fc5 = nn.Linear(hidden_dim, input_dim)
def encode(self, x):
"""Encode input into its latent representation
Returns mean and standard deviation"""
h = F.relu(self.fc1(x))
return self.fc2(h), self.fc3(h)
def sample(self, mu, log_var):
"""Sample a random codings vector from a gaussian distribution
Takes mean and log_var (gamma) as parameters"""
std = torch.exp(log_var/2)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
"""Decode codings"""
h = F.relu(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, x):
"""Encode inputs to obtain mean and standard deviation
Sample codings from gaussian distribution using mean and std
Returns decoded codings, mean and standard deviation"""
# YOUR CODE HERE
mu, log_var = self.encode(x)
z = self.sample(mu, log_var)
return self.decode(z), mu, log_var
# -
# ## Model training
#
# ### Question
#
# Complete the following training loop to:
# - instantiate the variational autoencoder on target device.
# - instanciate the Adam optimizer.
# - implement forward pass and gradient descent.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "1aa733bf1670de1f6f642ffc6c3dcfdf", "grade": true, "grade_id": "cell-11f1ec283e7cd535", "locked": false, "points": 2, "schema_version": 3, "solution": true, "task": false}
input_dim = 784
hidden_dim = 400
latent_dim = 20
num_epochs = 15
learning_rate = 1e-3
prints_per_epoch = 1 # Increase to see more feedback during training
# Instanciate VAE and optimizer
# YOUR CODE HERE
model = VAE(input_dim=input_dim, hidden_dim=hidden_dim, latent_dim=latent_dim).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train model
for epoch in range(num_epochs):
for i, (x, _) in enumerate(trainloader):
# zero the parameter gradients
optimizer.zero_grad()
# Forward pass
# YOUR CODE HERE
x = x.to(device)
x = torch.flatten(x, start_dim=1)
x_reconst, mu, log_var = model(x)
# Compute reconstruction loss and KL divergence
reconst_loss = F.binary_cross_entropy(x_reconst, x, reduction="sum")
kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
loss = reconst_loss + kl_div
# Backprop and optimize
# YOUR CODE HERE
loss.backward()
optimizer.step()
# Print losses at regular intervals
step_count = len(trainloader)
print_threshold = math.ceil(step_count / prints_per_epoch)
if (i + 1) % print_threshold == 0 or (i + 1) == step_count:
print(
f"Epoch [{epoch + 1}/{num_epochs}]"
f", step [{i + 1}/{step_count}]"
f", reconst loss: {reconst_loss.item():.4f}"
f", KL div: {kl_div.item():.4f}"
)
# -
# ## Reconstructions visualization¶
# +
def plot_image(image):
plt.imshow(image.cpu().numpy().squeeze(), cmap="binary")
plt.axis("off")
def show_reconstructions(model, images, n_images=8):
"""Show original and reconstructed images side-by-side"""
inputs = images.reshape(-1, 28*28).to(device)
reconstructions, _, _ = model(inputs)
fig = plt.figure(figsize=(n_images * 1.5, 3))
for image_index in range(n_images):
plt.subplot(2, n_images, 1 + image_index)
plot_image(images[image_index])
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(reconstructions[image_index].view(1, 28, 28))
# -
# ### Question
#
# Show reconstructions for one batch of test data.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "010911be05113bb3a62264e3800a0e1f", "grade": true, "grade_id": "cell-6927b3114069f46f", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
# YOUR CODE HERE
images, _ = next(iter(testloader))
with torch.no_grad():
show_reconstructions(model, images, n_images=8)
# -
# ## Generating new images¶
def plot_multiple_images(images, n_cols=None):
"""Show a series of images"""
n_cols = n_cols or len(images)
n_rows = (len(images) - 1) // n_cols + 1
if images.shape[-1] == 1:
images = np.squeeze(images, axis=-1)
plt.figure(figsize=(n_cols * 1.5, 3))
for index, image in enumerate(images):
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(image.cpu().numpy().squeeze(), cmap="binary")
plt.axis("off")
# ### Question
#
# Use the VAE to show several generated digits.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "bb4b694aae1da7ed57a3c2d7ba1a3134", "grade": true, "grade_id": "cell-58f40750d5551290", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
with torch.no_grad():
z = torch.randn(16, latent_dim).to(device)
# YOUR CODE HERE
images = model.decode(z)
images = images.view(-1, 1, 28, 28)
plot_multiple_images(images, n_cols=16)
# -
| mnist_vae_pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Linear Regression with PyTorch
#
# #### Part 2 of "PyTorch: Zero to GANs"
#
# *This post is the second in a series of tutorials on building deep learning models with PyTorch, an open source neural networks library developed and maintained by Facebook. Check out the full series:*
#
# 1. [PyTorch Basics: Tensors & Gradients](https://jovian.ml/aakashns/01-pytorch-basics)
# 2. [Linear Regression & Gradient Descent](https://jovian.ml/aakashns/02-linear-regression)
# 3. [Image Classfication using Logistic Regression](https://jovian.ml/aakashns/03-logistic-regression)
# 4. [Training Deep Neural Networks on a GPU](https://jovian.ml/aakashns/04-feedforward-nn)
# 5. Convolutional Neural Networks, Regularization and ResNets (coming soon..)
# 6. Generative Adverserial Networks (coming soon..)
#
#
# <div height="315">
# <iframe width="560" height="315" src="https://www.youtube.com/embed/gERrXAk9h_A" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# <div>
#
# Continuing where the [previous tutorial](https://jvn.io/aakashns/3143ceb92b4f4cbbb4f30e203580b77b) left off, we'll discuss one of the foundational algorithms of machine learning in this post: *Linear regression*. We'll create a model that predicts crop yields for apples and oranges (*target variables*) by looking at the average temperature, rainfall and humidity (*input variables or features*) in a region. Here's the training data:
#
# ![linear-regression-training-data](https://i.imgur.com/6Ujttb4.png)
#
# In a linear regression model, each target variable is estimated to be a weighted sum of the input variables, offset by some constant, known as a bias :
#
# ```
# yield_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1
# yield_orange = w21 * temp + w22 * rainfall + w23 * humidity + b2
# ```
#
# Visually, it means that the yield of apples is a linear or planar function of temperature, rainfall and humidity:
#
# ![linear-regression-graph](https://i.imgur.com/4DJ9f8X.png)
#
# The *learning* part of linear regression is to figure out a set of weights `w11, w12,... w23, b1 & b2` by looking at the training data, to make accurate predictions for new data (i.e. to predict the yields for apples and oranges in a new region using the average temperature, rainfall and humidity). This is done by adjusting the weights slightly many times to make better predictions, using an optimization technique called *gradient descent*.
# ## System setup
#
# If you want to follow along and run the code as you read, you can run this notebook by clicking the 'Run' button at the top of this page. You can also clone this notebook hosted on [Jovian.ml](https://www.jovian.ml), install the required dependencies, and start Jupyter by running the following commands on the terminal:
#
# ```bash
# pip install jovian --upgrade # Install the jovian library
# jovian clone aakashns/02-linear-regression # Download notebook & dependencies
# # cd 02-linear-regression # Enter the created directory
# jovian install # Install the dependencies
# conda activate 02-linear-regression # Activate virtual environment
# jupyter notebook # Start Jupyter
# ```
#
# On older versions of conda, you might need to run `source activate 02-linear-regression` to activate the environment. For a more detailed explanation of the above steps, check out the *System setup* section in the [previous notebook](https://jovian.ml/aakashns/01-pytorch-basics).
# We begin by importing Numpy and PyTorch:
import numpy as np
import torch
# ## Training data
#
# The training data can be represented using 2 matrices: `inputs` and `targets`, each with one row per observation, and one column per variable.
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119]], dtype='float32')
# We've separated the input and target variables, because we'll operate on them separately. Also, we've created numpy arrays, because this is typically how you would work with training data: read some CSV files as numpy arrays, do some processing, and then convert them to PyTorch tensors as follows:
# Convert inputs and targets to tensors
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
# ## Linear regression model from scratch
#
# The weights and biases (`w11, w12,... w23, b1 & b2`) can also be represented as matrices, initialized as random values. The first row of `w` and the first element of `b` are used to predict the first target variable i.e. yield of apples, and similarly the second for oranges.
# Weights and biases
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
print(w)
print(b)
# `torch.randn` creates a tensor with the given shape, with elements picked randomly from a [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) with mean 0 and standard deviation 1.
#
# Our *model* is simply a function that performs a matrix multiplication of the `inputs` and the weights `w` (transposed) and adds the bias `b` (replicated for each observation).
#
# ![matrix-mult](https://i.imgur.com/WGXLFvA.png)
#
# We can define the model as follows:
def model(x):
return x @ w.t() + b
# `@` represents matrix multiplication in PyTorch, and the `.t` method returns the transpose of a tensor.
#
# The matrix obtained by passing the input data into the model is a set of predictions for the target variables.
# Generate predictions
preds = model(inputs)
print(preds)
# Let's compare the predictions of our model with the actual targets.
# Compare with targets
print(targets)
# You can see that there's a huge difference between the predictions of our model, and the actual values of the target variables. Obviously, this is because we've initialized our model with random weights and biases, and we can't expect it to *just work*.
# ## Loss function
#
# Before we improve our model, we need a way to evaluate how well our model is performing. We can compare the model's predictions with the actual targets, using the following method:
#
# * Calculate the difference between the two matrices (`preds` and `targets`).
# * Square all elements of the difference matrix to remove negative values.
# * Calculate the average of the elements in the resulting matrix.
#
# The result is a single number, known as the **mean squared error** (MSE).
# MSE loss
def mse(t1, t2):
diff = t1 - t2
return torch.sum(diff * diff) / diff.numel()
# `torch.sum` returns the sum of all the elements in a tensor, and the `.numel` method returns the number of elements in a tensor. Let's compute the mean squared error for the current predictions of our model.
# Compute loss
loss = mse(preds, targets)
print(loss)
# Here’s how we can interpret the result: *On average, each element in the prediction differs from the actual target by about 138 (square root of the loss 19044)*. And that’s pretty bad, considering the numbers we are trying to predict are themselves in the range 50–200. Also, the result is called the *loss*, because it indicates how bad the model is at predicting the target variables. Lower the loss, better the model.
# ## Compute gradients
#
# With PyTorch, we can automatically compute the gradient or derivative of the loss w.r.t. to the weights and biases, because they have `requires_grad` set to `True`.
# Compute gradients
loss.backward()
# The gradients are stored in the `.grad` property of the respective tensors. Note that the derivative of the loss w.r.t. the weights matrix is itself a matrix, with the same dimensions.
# Gradients for weights
print(w)
print(w.grad)
# The loss is a [quadratic function](https://en.wikipedia.org/wiki/Quadratic_function) of our weights and biases, and our objective is to find the set of weights where the loss is the lowest. If we plot a graph of the loss w.r.t any individual weight or bias element, it will look like the figure shown below. A key insight from calculus is that the gradient indicates the rate of change of the loss, or the [slope](https://en.wikipedia.org/wiki/Slope) of the loss function w.r.t. the weights and biases.
#
# If a gradient element is **positive**:
# * **increasing** the element's value slightly will **increase** the loss.
# * **decreasing** the element's value slightly will **decrease** the loss
#
# ![postive-gradient](https://i.imgur.com/hFYoVgU.png)
#
# If a gradient element is **negative**:
# * **increasing** the element's value slightly will **decrease** the loss.
# * **decreasing** the element's value slightly will **increase** the loss.
#
# ![negative=gradient](https://i.imgur.com/w3Wii7C.png)
#
# The increase or decrease in loss by changing a weight element is proportional to the value of the gradient of the loss w.r.t. that element. This forms the basis for the optimization algorithm that we'll use to improve our model.
# Before we proceed, we reset the gradients to zero by calling `.zero_()` method. We need to do this, because PyTorch accumulates, gradients i.e. the next time we call `.backward` on the loss, the new gradient values will get added to the existing gradient values, which may lead to unexpected results.
w.grad.zero_()
b.grad.zero_()
print(w.grad)
print(b.grad)
# ## Adjust weights and biases using gradient descent
#
# We'll reduce the loss and improve our model using the gradient descent optimization algorithm, which has the following steps:
#
# 1. Generate predictions
#
# 2. Calculate the loss
#
# 3. Compute gradients w.r.t the weights and biases
#
# 4. Adjust the weights by subtracting a small quantity proportional to the gradient
#
# 5. Reset the gradients to zero
#
# Let's implement the above step by step.
# Generate predictions
preds = model(inputs)
print(preds)
# Note that the predictions are same as before, since we haven't made any changes to our model. The same holds true for the loss and gradients.
# Calculate the loss
loss = mse(preds, targets)
print(loss)
# Compute gradients
loss.backward()
print(w.grad)
print(b.grad)
# Finally, we update the weights and biases using the gradients computed above.
# Adjust weights & reset gradients
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
# A few things to note above:
#
# * We use `torch.no_grad` to indicate to PyTorch that we shouldn't track, calculate or modify gradients while updating the weights and biases.
#
# * We multiply the gradients with a really small number (`10^-5` in this case), to ensure that we don't modify the weights by a really large amount, since we only want to take a small step in the downhill direction of the gradient. This number is called the *learning rate* of the algorithm.
#
# * After we have updated the weights, we reset the gradients back to zero, to avoid affecting any future computations.
# Let's take a look at the new weights and biases.
print(w)
print(b)
# With the new weights and biases, the model should have lower loss.
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
# We have already achieved a significant reduction in the loss, simply by adjusting the weights and biases slightly using gradient descent.
# ## Train for multiple epochs
#
# To reduce the loss further, we can repeat the process of adjusting the weights and biases using the gradients multiple times. Each iteration is called an epoch. Let's train the model for 100 epochs.
# Train for 100 epochs
for i in range(100):
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
# Once again, let's verify that the loss is now lower:
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
# As you can see, the loss is now much lower than what we started out with. Let's look at the model's predictions and compare them with the targets.
# Predictions
preds
# Targets
targets
# The prediction are now quite close to the target variables, and we can get even better results by training for a few more epochs.
#
# At this point, we can save our notebook and upload it to [Jovian.ml](https://www.jovian.ml) for future reference and sharing.
# !pip install jovian --upgrade -q
import jovian
jovian.commit()
# `jovian.commit` uploads the notebook to [Jovian.ml](https://www.jovian.ml), captures the Python environment and creates a sharable link for the notebook. You can use this link to share your work and let anyone reproduce it easily with the `jovian clone` command. Jovian also includes a powerful commenting interface, so you (and others) can discuss & comment on specific parts of your notebook:
#
# ![commenting on jovian](https://cdn-images-1.medium.com/max/1600/1*b4snnr_5Ve5Nyq60iDtuuw.png)
# ## Linear regression using PyTorch built-ins
#
# The model and training process above were implemented using basic matrix operations. But since this such a common pattern , PyTorch has several built-in functions and classes to make it easy to create and train models.
#
# Let's begin by importing the `torch.nn` package from PyTorch, which contains utility classes for building neural networks.
import torch.nn as nn
# As before, we represent the inputs and targets and matrices.
# +
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58],
[102, 43, 37], [69, 96, 70], [73, 67, 43],
[91, 88, 64], [87, 134, 58], [102, 43, 37],
[69, 96, 70], [73, 67, 43], [91, 88, 64],
[87, 134, 58], [102, 43, 37], [69, 96, 70]],
dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133],
[22, 37], [103, 119], [56, 70],
[81, 101], [119, 133], [22, 37],
[103, 119], [56, 70], [81, 101],
[119, 133], [22, 37], [103, 119]],
dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
# -
# We are using 15 training examples this time, to illustrate how to work with large datasets in small batches.
# ## Dataset and DataLoader
#
# We'll create a `TensorDataset`, which allows access to rows from `inputs` and `targets` as tuples, and provides standard APIs for working with many different types of datasets in PyTorch.
from torch.utils.data import TensorDataset
# Define dataset
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
# The `TensorDataset` allows us to access a small section of the training data using the array indexing notation (`[0:3]` in the above code). It returns a tuple (or pair), in which the first element contains the input variables for the selected rows, and the second contains the targets.
# We'll also create a `DataLoader`, which can split the data into batches of a predefined size while training. It also provides other utilities like shuffling and random sampling of the data.
from torch.utils.data import DataLoader
# Define data loader
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
# The data loader is typically used in a `for-in` loop. Let's look at an example.
for xb, yb in train_dl:
print(xb)
print(yb)
break
# In each iteration, the data loader returns one batch of data, with the given batch size. If `shuffle` is set to `True`, it shuffles the training data before creating batches. Shuffling helps randomize the input to the optimization algorithm, which can lead to faster reduction in the loss.
# ## nn.Linear
#
# Instead of initializing the weights & biases manually, we can define the model using the `nn.Linear` class from PyTorch, which does it automatically.
# Define model
model = nn.Linear(3, 2)
print(model.weight)
print(model.bias)
# PyTorch models also have a helpful `.parameters` method, which returns a list containing all the weights and bias matrices present in the model. For our linear regression model, we have one weight matrix and one bias matrix.
# Parameters
list(model.parameters())
# We can use the model to generate predictions in the exact same way as before:
# Generate predictions
preds = model(inputs)
preds
# ## Loss Function
#
# Instead of defining a loss function manually, we can use the built-in loss function `mse_loss`.
# Import nn.functional
import torch.nn.functional as F
# The `nn.functional` package contains many useful loss functions and several other utilities.
# Define loss function
loss_fn = F.mse_loss
# Let's compute the loss for the current predictions of our model.
loss = loss_fn(model(inputs), targets)
print(loss)
# ## Optimizer
#
# Instead of manually manipulating the model's weights & biases using gradients, we can use the optimizer `optim.SGD`. SGD stands for `stochastic gradient descent`. It is called `stochastic` because samples are selected in batches (often with random shuffling) instead of as a single group.
# Define optimizer
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
# Note that `model.parameters()` is passed as an argument to `optim.SGD`, so that the optimizer knows which matrices should be modified during the update step. Also, we can specify a learning rate which controls the amount by which the parameters are modified.
# ## Train the model
#
# We are now ready to train the model. We'll follow the exact same process to implement gradient descent:
#
# 1. Generate predictions
#
# 2. Calculate the loss
#
# 3. Compute gradients w.r.t the weights and biases
#
# 4. Adjust the weights by subtracting a small quantity proportional to the gradient
#
# 5. Reset the gradients to zero
#
# The only change is that we'll work batches of data, instead of processing the entire training data in every iteration. Let's define a utility function `fit` which trains the model for a given number of epochs.
# Utility function to train the model
def fit(num_epochs, model, loss_fn, opt):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb,yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate loss
loss = loss_fn(pred, yb)
# 3. Compute gradients
loss.backward()
# 4. Update parameters using gradients
opt.step()
# 5. Reset the gradients to zero
opt.zero_grad()
# Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# Some things to note above:
#
# * We use the data loader defined earlier to get batches of data for every iteration.
#
# * Instead of updating parameters (weights and biases) manually, we use `opt.step` to perform the update, and `opt.zero_grad` to reset the gradients to zero.
#
# * We've also added a log statement which prints the loss from the last batch of data for every 10th epoch, to track the progress of training. `loss.item` returns the actual value stored in the loss tensor.
#
# Let's train the model for 100 epochs.
fit(100, model, loss_fn, opt)
# Let's generate predictions using our model and verify that they're close to our targets.
# Generate predictions
preds = model(inputs)
preds
# Compare with targets
targets
# Indeed, the predictions are quite close to our targets, and now we have a fairly good model to predict crop yields for apples and oranges by looking at the average temperature, rainfall and humidity in a region.
# ## Commit and update the notebook
#
# As a final step, we can record a new version of the notebook using the `jovian` library.
import jovian
jovian.commit()
# Note that running `jovian.commit` a second time records a new version of your existing notebook. With Jovian.ml, you can avoid creating copies of your Jupyter notebooks and keep versions organized. Jovian also provides a visual diff ([example](https://jovian.ml/aakashns/keras-mnist-jovian/diff?base=8&remote=2)) so you can inspect what has changed between different versions:
#
# ![jovian-commenting](https://i.imgur.com/HF1cOVt.png)
# ## Further Reading
#
# We've covered a lot of ground this this tutorial, including *linear regression* and the *gradient descent* optimization algorithm. Here are a few resources if you'd like to dig deeper into these topics:
#
# * For a more detailed explanation of derivates and gradient descent, see [these notes from a Udacity course](https://storage.googleapis.com/supplemental_media/udacityu/315142919/Gradient%20Descent.pdf).
#
# * For an animated visualization of how linear regression works, [see this post](https://hackernoon.com/visualizing-linear-regression-with-pytorch-9261f49edb09).
#
# * For a more mathematical treatment of matrix calculus, linear regression and gradient descent, you should check out [<NAME>'s excellent course notes](https://github.com/Cleo-Stanford-CS/CS229_Notes/blob/master/lectures/cs229-notes1.pdf) from CS229 at Stanford University.
#
# * To practice and test your skills, you can participate in the [Boston Housing Price Prediction](https://www.kaggle.com/c/boston-housing) competition on Kaggle, a website that hosts data science competitions.
# With this, we complete our discussion of linear regression in PyTorch, and we’re ready to move on to the next topic: *Logistic regression*.
| linear-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h1>On Galerkin approximations for the QG equations</h1>
# <h2>Supplementary materials</h2>
# <p></p>
# </h3><NAME>*</h3>
# </h3>, <NAME>, and <NAME> </h3>
# <p></p>
# </h4>Winter 2015 </h4>
# <p></p>
# *Scripps Institution of Oceanography, University of California, San Diego, 9500 Gilman Dr. MC 0213, La Jolla, CA/USA, <<EMAIL>>
#
#
#
# ###The Eady problem
# <li><a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/eady_problem/linear_eady_galerk.ipynb">Base-state and linear stability analysis</a></li>
# <p></p>
# <li><a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/eady_problem/eady_base_state_pv.ipynb">The Eady base-state approximate PV</a></li>
# <p></p>
# <li><a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/eady_problem/wave_structure.ipynb">Approximation A spurius unstable modes: wave structure in the $(x,z)$-plane</a></li>
# <p></p>
# <p>Output data (click on "view raw" to download)</p>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/eady_problem/linear_eady_A.npz">Approximation A</a></li>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/eady_problem/linear_eady_C.npz">Approximation C</a></li>
# ### The $\beta-$Eady problem
# <li><a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/beta_eady_galerk.ipynb">The $\beta$-Eady linear stability analysis</a></li>
# <p></p>
# <li><a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/beta_eady_galerk.ipynb">Finite differences solution: Charney mode wave structure in the $(x,z)$-plane</a></li>
#
# <p></p>
# <p>Output data (click on "view raw" to download)</p>
# <p></p>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/outputs/beta-eady_A.npz">Approximation A</a></li>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/outputs/beta-eady_B.npz">Approximation B</a></li>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/outputs/beta-eady_C.npz">Approximation C</a></li>
# <p></p>
# <li><a href="https://github.com/crocha700/qg_vertical_modes/blob/master/beta_eady_problem/outputs/beta-eady_num.npz">Finite differences</a></li>
# ### Extras
# <a href="http://nbviewer.ipython.org/github/crocha700/qg_vertical_modes/blob/master/elementary_example/elementary_example.ipynb">Can we get a sine out of cosines? An elementary example of Galerkin approximation</a></li>
# <p></p>
# <a href="./supplementary_galerkin_qg.pdf">Algebra details</a></li>
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import glob
import logging
import pandas as pd
import os
import json
from ml4ir.base.io import file_io
from ml4ir.base.data import tfrecord_writer
from sklearn.datasets import load_iris
from ml4ir.base.features.feature_config import parse_config
from ml4ir.base.features.feature_config import ExampleFeatureConfig
from ml4ir.base.config.keys import TFRecordTypeKey
# Setup logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.debug("Logger is initialized...")
# -
# Create the a dataframe for iris
df = load_iris()
df.feature_names = [x[0]+x.split()[1] for x in df.feature_names] # making feature names shorter e.g., sepal length (cm) -> s_length
data = pd.DataFrame(df.data, columns=df.feature_names)
data['label'] = df['target']
data['query_key'] = data.index
data.head()
feature_config_yaml = '''
query_key:
name: query_key
node_name: query_key
trainable: false
dtype: int64
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
serving_info:
required: false
default_value: 0
tfrecord_type: context
label:
name: label
node_name: label
trainable: false
dtype: int64
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
serving_info:
required: false
default_value: 0
tfrecord_type: sequence
features:
- name: slength
node_name: slength
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: swidth
node_name: swidth
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: plength
node_name: plength
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: pwidth
node_name: pwidth
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
'''
feature_config: ExampleFeatureConfig = parse_config(TFRecordTypeKey.EXAMPLE, feature_config_yaml, logger=logger)
# +
# Save as TFRecord SequenceExample/Example
TFRECORD_DIR = '/tmp/classification/'
if not os.path.exists(TFRECORD_DIR):
os.makedirs(TFRECORD_DIR)
tfrecord_writer.write_from_df(d,
tfrecord_file=os.path.join(TFRECORD_DIR, 'file_0.tfrecord'),
feature_config=feature_config,
tfrecord_type=TFRecordTypeKey.EXAMPLE)
# Let's see what it looks like
df.head()
| python/notebooks/iris_write_tf_records_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regresión
#
# Datos de [renta de bicis](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
bici_hora = pd.read_csv('datos/hour.csv')
bici_hora.shape
bici_hora.head()
bici_pred = bici_hora.drop(['instant', 'dteday', 'casual', 'registered'], axis=1)
# +
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor, RadiusNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
# -
X_train, X_test, y_train, y_test = train_test_split(
bici_pred.drop('cnt', axis=1), # X
bici_pred.cnt, # y
test_size=0.2, # porcentaje que será prueba
random_state=42) # para fijar el aleatorio (reproducibilidad)
# +
knn_grid = {
'n_neighbors': list(range(1, 10))
}
knn_cv = GridSearchCV(KNeighborsRegressor(), knn_grid,
scoring='neg_mean_squared_error')
knn_cv.fit(X_train, y_train)
# -
knn_cv.best_params_
knn_cv.best_score_
plt.scatter(knn_cv.predict(X_test), y_test, alpha=0.2)
plt.xlabel('Pred')
plt.ylabel('Real')
plt.plot([0,y_test.max()], [0,y_test.max()], 'r')
plt.show()
# ## Autoregresión
bici_diario = pd.read_csv('datos/day.csv')
bici_diario.cnt.plot()
plt.show()
# +
from pandas.plotting import lag_plot
lag_plot(bici_diario.cnt)
plt.show()
# -
lag_plot(bici_diario.cnt, lag=2)
plt.show()
lag_plot(bici_diario.cnt, lag=15)
plt.show()
# +
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(bici_diario.cnt)
plt.show()
# -
from statsmodels.tsa.ar_model import AR
ts = AR(bici_diario.cnt.values, bici_diario.dteday.values)
# +
plt.figure(figsize=(20,8))
max_lag = 100
ts_bici = ts.fit(maxlag=max_lag)
bici_diario.cnt.plot()
plt.plot(range(max_lag, bici_diario.cnt.shape[0]), ts_bici.predict(), 'r')
plt.show()
| 6_Metodos_Regresion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gibbs sampling in 2D
#
# This is BONUS content related to Day 22, where we introduce Gibbs sampling
#
# ## Random variables
#
# (We'll use 0-indexing so we have close alignment between math and python code)
#
# * 2D random variable $z = [z_0, z_1]$
# * each entry $z_d$ is a real scalar: $z_d \in \mathbb{R}$
#
# ## Target distribution
#
# \begin{align}
# p^*(z_0, z_1) = \mathcal{N}\left(
# \left[ \begin{array}{c}
# 0 \\ 0
# \end{array} \right],
# \left[
# \begin{array}{c c}
# 1 & 0.8 \\
# 0.8 & 2
# \end{array} \right] \right)
# \end{align}
#
# ## Key takeaways
#
# * New concept: 'Gibbs sampling', which just iterates between two conditional sampling distributions:
#
# \begin{align}
# z^{t+1}_0 &\sim p^* (z_0 | z_1 = z^t_1) \\
# z^{t+1}_1 &\sim p^* (z_1 | z_0 = z^{t+1}_0)
# \end{align}
#
# ## Things to remember
#
# This is a simple example to illustrate the idea of how Gibbs sampling works.
#
# There are other "better" ways of sampling from a 2d normal.
#
# # Setup
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=2.0)
# # Step 1: Prepare for Gibbs sampling
#
# ## Define functions to sample from target's conditionals
#
def draw_z0_given_z1(z1, random_state):
## First, use Bishop textbook formulas to compute the conditional mean/var
mean_01 = 0.4 * z1
var_01 = 0.68
## Then, use simple transform to obtain a sample from this conditional
## Remember, if u ~ Normal(0, 1), a "standard" normal with mean 0 variance 1,
## then using transform: x <- T(u), with T(u) = \mu + \sigma * u
## we can say x ~ Normal(\mu, \sigma^2)
u_samp = random_state.randn()
z0_samp = mean_01 + np.sqrt(var_01) * u_samp
return z0_samp
def draw_z1_given_z0(z0, random_state):
## First, use Bishop textbook formulas to compute conditional mean/var
mean_10 = 0.8 * z0
var_10 = 1.36
## Then, use simple transform to obtain a sample from this conditional
## Remember, if u ~ Normal(0, 1), a "standard" normal with mean 0 variance 1,
## then using transform: x <- T(u), with T(u) = \mu + \sigma * u
## we can say x ~ Normal(\mu, \sigma^2)
u_samp = random_state.randn()
z1_samp = mean_10 + np.sqrt(var_10) * u_samp
return z1_samp
# # Step 2: Execute the Gibbs sampling algorithm
#
# Perform 6000 iterations.
#
# Discard the first 1000 as "not yet burned in".
# +
S = 6000
sample_list = list()
z_D = np.zeros(2)
random_state = np.random.RandomState(0) # reproducible random seeds
for t in range(S):
z_D[0] = draw_z0_given_z1(z_D[1], random_state)
z_D[1] = draw_z1_given_z0(z_D[0], random_state)
if t > 1000:
sample_list.append(z_D.copy()) # save copies so we get different vectors
# -
z_samples_SD = np.vstack(sample_list)
# ## Step 3: Compare to samples from built-in routines for 2D MVNormal sampling
Cov_22 = np.asarray([[1.0, 0.8], [0.8, 2.0]])
true_samples_SD = random_state.multivariate_normal(np.zeros(2), Cov_22, size=S-1000)
# +
fig, ax_grid = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(10,4))
ax_grid[0].plot(z_samples_SD[:,0], z_samples_SD[:,1], 'k.')
ax_grid[0].set_title('Gibbs sampler')
ax_grid[0].set_aspect('equal', 'box');
ax_grid[1].plot(true_samples_SD[:,0], true_samples_SD[:,1], 'k.')
ax_grid[1].set_title('np.random.multivariate_normal')
ax_grid[1].set_aspect('equal', 'box');
ax_grid[1].set_xlim([-6, 6]);
ax_grid[1].set_ylim([-6, 6]);
# -
| notebooks/GibbsSampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from falass import readwrite, job, sld, reflect, compare, analysis
# read in the pdb, lgt, and dat files
# this paritcular simulation had the water at the bottom of the
#simulation cell so it is necessary to flip the cell
lipid_files = readwrite.Files('example.pdb', lgtfile='example.lgt', datfile='example.dat', flip=True)
# after defining the files above these lines will read them in
lipid_files.read_pdb()
lipid_files.read_lgt()
lipid_files.read_dat()
# these files are then defined within the job, along with the desired
# layer thickness and cut-off
lipid_job = job.Job(lipid_files, 1., 5.)
# the pdb and lgts files are compared to ensure that the all atom types in the
# have a scattering length associated
lipid_job.set_lgts()
# the job is then passed to the SLD class from which the SLD is calculated for
# each timestep
lipid_sld = sld.SLD(lipid_job)
lipid_sld.get_sld_profile()
lipid_sld.average_sld_profile()
# the SLD profile can be plotted
plt1 = lipid_sld.plot_sld_profile()
plt1.show()
# to get the reflectometry the sld profile is passed to the reflect class
# along with the experimental data (from which the q-vectors are taken)
lipid_reflect = reflect.Reflect(lipid_sld.sld_profile, lipid_files.expdata)
# the reflectometry is calculated for each timesteps and averaged
lipid_reflect.calc_ref()
lipid_reflect.average_ref()
# the calculated reflectivity can then be plotted
plt2 = lipid_reflect.plot_ref(rq4=False)
plt2.show()
# to compare with the experimental data we use the compare class, and define
# starting scale and background values
lipid_compare = compare.Compare(lipid_files.expdata, lipid_reflect.averagereflect, 1e-1, 1e-6)
# the scale and background are fitted and the fitted, calculated reflectometry
# data is returned
lipid_compare.fit()
lipid_compare.return_fitted()
# this can then be plotted
plt3 = lipid_compare.plot_compare()
plt3.show()
| example/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
tf.enable_eager_execution()
# +
x = tf.ones((2, 2))
with tf.GradientTape() as t:
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Derivative of z with respect to the original input tensor x
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
assert dz_dx[i][j].numpy() == 8.0
# +
x = tf.ones((2, 2))
with tf.GradientTape() as t:
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Use the tape to compute the derivative of z with respect to the
# intermediate value y.
dz_dy = t.gradient(z, y)
assert dz_dy.numpy() == 8.0
# -
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as t:
t.watch(x)
y = x * x
z = y * y
dz_dx = t.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = t.gradient(y, x) # 6.0
del t # Drop the reference to the tape
# +
def f(x, y):
output = 1.0
for i in range(y):
if i > 1 and i < 5:
output = tf.multiply(output, x)
return output
def grad(x, y):
with tf.GradientTape() as t:
t.watch(x)
out = f(x, y)
return t.gradient(out, x)
x = tf.convert_to_tensor(2.0)
assert grad(x, 6).numpy() == 12.0
assert grad(x, 5).numpy() == 12.0
assert grad(x, 4).numpy() == 4.0
# +
x = tf.Variable(1.0) # Create a Tensorflow variable initialized to 1.0
with tf.GradientTape() as t:
with tf.GradientTape() as t2:
y = x * x * x
# Compute the gradient inside the 't' context manager
# which means the gradient computation is differentiable as well.
dy_dx = t2.gradient(y, x)
d2y_dx2 = t.gradient(dy_dx, x)
assert dy_dx.numpy() == 3.0
assert d2y_dx2.numpy() == 6.0
| Automatic differentiation and gradient tape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.12 (''venv'': venv)'
# language: python
# name: python3
# ---
# + [markdown] cell_id="52eafc730e2d49ad965fccf305983a27" deepnote_cell_height=115.96875 deepnote_cell_type="markdown" tags=[]
# # Our data
#
# We'll use the data from the post from which this tutorial was derived. It contains sales information for a number of companies.
# + cell_id="01bbb3a1-6c4c-4dfa-b285-f351871891f5" deepnote_cell_height=423 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=925 execution_start=1652108697250 source_hash="fb6f3ff4" tags=[]
import numpy as np
import matplotlib.pyplot as plt
data = {
'Barton LLC': 109438.50,
'<NAME>': 103569.59,
'<NAME>': 112214.71,
'Jerde-Hilpert': 112591.43,
'Keeling LLC': 100934.30,
'Koepp Ltd': 103660.54,
'Kulas Inc': 137351.96,
'Trantow-Barrows': 123381.38,
'White-Trantow': 135841.99,
'Will LLC': 104437.60
}
group_data = list(data.values())
group_names = list(data.keys())
group_mean = np.mean(group_data)
# + [markdown] cell_id="725fb61be9f54da0bc31d27cc931e670" deepnote_cell_height=178.34375 deepnote_cell_type="markdown" tags=[]
# ## Getting started
#
# This data is naturally visualized as a barplot, with one bar per group. To do this with the object-oriented approach, we first generate an instance of **figure.Figure** and **axes.Axes**. The Figure is like a canvas, and the Axes is a part of that canvas on which we will make a particular visualization.
# + cell_id="5659486d3b834aa6a9e2c1300a0bdaf0" deepnote_cell_height=393.796875 deepnote_cell_type="code" deepnote_output_heights=[21.1875, 226.609375] deepnote_to_be_reexecuted=false execution_millis=271 execution_start=1652108701287 source_hash="d5252ee5" tags=[]
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
# + [markdown] cell_id="41636e3f195c469184d71fa0fa852286" deepnote_cell_height=133.5625 deepnote_cell_type="markdown" tags=[]
# ## Controlling the style
#
# There are many styles available in Matplotlib in order to let you tailor your visualization to your needs. To see a list of styles, we can use **style**.
# + cell_id="1732b828f50344799ab492e15799e875" deepnote_cell_height=116.1875 deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=369 execution_start=1652108721262 source_hash="98dea33d" tags=[]
print(plt.style.available)
# + [markdown] cell_id="082aa6b75bb148cbb08000c76225d0eb" deepnote_cell_height=74.78125 deepnote_cell_type="markdown" tags=[]
# Now let's remake the above plot by activating **fivethirtyeight** style to see how it looks
# + cell_id="fc1ff798cfb3453db62c74ce6b035e88" deepnote_cell_height=387.125 deepnote_cell_type="code" deepnote_output_heights=[21.1875, 201.9375] deepnote_to_be_reexecuted=false execution_millis=1005 execution_start=1652108793784 source_hash="cb9ff4db" tags=[]
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
# + [markdown] cell_id="81a9dc8ccd344be7afe0f97ccc658a5e" deepnote_cell_height=155.953125 deepnote_cell_type="markdown" tags=[]
# ## Customxing the plot
#
# Now we've got a plot with the general look that we want, so let's fine-tune it so that it's ready for print. First let's rotate the labels on the x-axis so that they show up more clearly. We can gain access to these labels with the **axes.Axes.get_xticklabels()** method
# + cell_id="0b21fcd68bd34942ae31ae4ae4020e25" deepnote_cell_height=334.9375 deepnote_cell_type="code" deepnote_output_heights=[201.9375] deepnote_to_be_reexecuted=false execution_millis=253 execution_start=1652108827115 source_hash="4cb4dc7" tags=[]
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
# + [markdown] cell_id="4b66da118f7849928b0fcc4737f325ae" deepnote_cell_height=119.5625 deepnote_cell_type="markdown" tags=[]
# If we'd like to set the property of many items at once, it's useful to use the **pyplot.setp()** function. This will take a list (or many lists) of Matplotlib objects, and attempt to set some style element of each one.
# + cell_id="4f68bed2659346fbbc83fa0ec94dd56b" deepnote_cell_height=761.875 deepnote_cell_type="code" deepnote_output_heights=[347.375, 232.5] deepnote_to_be_reexecuted=false execution_millis=258 execution_start=1652108878435 source_hash="c6d175f1" tags=[]
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
# + [markdown] cell_id="b68c32ea2ed8430d8e57ae1a9ddc39b7" deepnote_cell_height=141.953125 deepnote_cell_type="markdown" tags=[]
# It looks like this cut off some of the labels on the bottom. We can tell Matplotlib to automatically make room for elements in the figures that we create. To do this we set the `autolayout` value of our rcParams. For more information on controlling the style, layout, and other features of plots with rcParams.
# + cell_id="3a04d4f0d498432585723711febdad5d" deepnote_cell_height=839.375 deepnote_cell_type="code" deepnote_output_heights=[347.375, 274] deepnote_to_be_reexecuted=false execution_millis=337 execution_start=1652108975522 source_hash="44903ca6" tags=[]
plt.rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
# + [markdown] cell_id="3c3d1b1753824e7f8f8358eb248d81c7" deepnote_cell_height=97.171875 deepnote_cell_type="markdown" tags=[]
# Next, we add labels to the plot. To do this with the OO interface, we can use the `Artist.set()` method to set properties of this Axes objec
# + cell_id="04a1fa1607ed46e7b0aba77aad0109bc" deepnote_cell_height=642.75 deepnote_cell_type="code" deepnote_output_heights=[78.75, 274] deepnote_to_be_reexecuted=false execution_millis=341 execution_start=1652109133290 source_hash="4429e000" tags=[]
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(
xlim = [-10000, 140000],
xlabel = 'Total Revenue',
ylabel = 'Company',
title = 'Company Revenue'
)
# + [markdown] cell_id="23dd2f0a30784418a59b1b04e0500369" deepnote_cell_height=74.78125 deepnote_cell_type="markdown" tags=[]
# We can also adjust the size of this plot using the pyplot.`subplots()` function. We can do this with the *figsize* keyword argument.
# + cell_id="e8a23432a81e458082ed0e0249a6ab59" deepnote_cell_height=621.671875 deepnote_cell_type="code" deepnote_output_heights=[78.75, 252.921875] deepnote_to_be_reexecuted=false execution_millis=372 execution_start=1652109188464 source_hash="55b6735b" tags=[]
fig, ax = plt.subplots(figsize=(9, 5))
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(
xlim = [-10000, 140000],
xlabel = 'Total Revenue',
ylabel = 'Company',
title = 'Company Revenue'
)
# + [markdown] cell_id="e0166d6c2c3a4fa5bd5f9c26c1dfc8e1" deepnote_cell_height=304.296875 deepnote_cell_type="markdown" tags=[]
# For labels, we can specify custom formatting guidelines in the form of functions. Below we define a function that takes an integer as input, and returns a string as an output. When used with **Axis.set_major_formatter** or **Axis.set_minor_formatter**, they will automatically create and use a **ticker.FuncFormatter** class.
#
# For this function, the `x` argument is the original tick label and `pos` is the tick position. We will only use `x` here but both arguments are needed.
#
# We can then apply this function to the labels on our plot. To do this, we use the `xaxis` attribute of our axes. This lets you perform actions on a specific axis on our plot.
# + cell_id="cd5e845b37194470a1d6d4d1ac47d150" deepnote_cell_height=695 deepnote_cell_type="code" deepnote_output_heights=[274] deepnote_to_be_reexecuted=false execution_millis=316 execution_start=1652109422813 source_hash="4191fe46" tags=[]
def currency(x, pos):
"""The two args are the value and tick position"""
if x >= 1e6:
s = '${:1.1f}M'.format(x*1e-6)
else:
s = '${:1.0f}K'.format(x*1e-3)
return s
fig, ax = plt.subplots()
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.set(
xlim = [-10000, 140000],
xlabel = 'Total Revenue',
ylabel = 'Company',
title = 'Company Revenue'
)
ax.xaxis.set_major_formatter(currency)
# + [markdown] cell_id="5b658fde7bb145c1aa82efd6835faff5" deepnote_cell_height=257.5625 deepnote_cell_type="markdown" owner_user_id="<PASSWORD>" tags=[]
# ## Combining multiple visualizations
#
# It is possible to draw multiple plot elements on the same instance of **axes.Axes**. To do this we simply need to call another one of the plot methods on that axes object.
# + cell_id="af77630c21dd417d9afcae1605e866a7" deepnote_cell_height=642.03125 deepnote_cell_type="code" deepnote_output_heights=[149.03125] deepnote_to_be_reexecuted=false execution_millis=433 execution_start=1652110179372 source_hash="bce0208f" tags=[]
fig, ax = plt.subplots(figsize=(10, 8))
ax.barh(group_names, group_data)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, horizontalalignment='right')
ax.axvline(group_mean, ls='--', color='r')
for group in [3, 5, 8]:
ax.text(145000, group, 'New Company', fontsize=10, verticalalignment='center')
ax.title.set(y=1.05)
ax.set(
xlim = [-10000, 140000],
xlabel = 'Total Revenue',
ylabel = 'Company',
title = 'Company Revenue'
)
ax.xaxis.set_major_formatter(currency)
ax.set_xticks([0, 25e3, 50e3, 75e3, 100e3, 125e3])
fig.subplots_adjust(right=.1)
plt.show()
# + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[]
# <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=2e91f521-cb0c-4321-9d67-70e26237efa4' target="_blank">
# <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,<KEY>LX<KEY> > </img>
# Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
| matplotlib tutorial/plot_lifecycle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Segmented deformable mirrors
#
# We will use segmented deformable mirrors and simulate the PSFs that result from segment pistons and tilts. We will compare this functionality against Poppy, another optical propagation package.
#
# First we'll import all packages.
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import hcipy
import poppy
# +
# Parameters for the pupil function
pupil_diameter = 0.019725 # m
gap_size = 90e-6 # m
num_rings = 3
segment_flat_to_flat = (pupil_diameter - (2 * num_rings + 1) * gap_size) / (2 * num_rings + 1)
focal_length = 1 # m
# Parameters for the simulation
num_pix = 1024
wavelength = 638e-9
num_airy = 20
sampling = 4
norm = False
# -
# ## Instantiate the segmented mirrors
#
# ### HCIPy SM: `hsm`
#
# We need to generate a pupil grid for the aperture, and a focal grid and propagator for the focal plane images after the DM.
# +
# HCIPy grids and propagator
pupil_grid = hcipy.make_pupil_grid(dims=num_pix, diameter=pupil_diameter)
focal_grid = hcipy.make_focal_grid(sampling, num_airy,
pupil_diameter=pupil_diameter,
reference_wavelength=wavelength,
focal_length=focal_length)
focal_grid = focal_grid.shifted(focal_grid.delta / 2)
prop = hcipy.FraunhoferPropagator(pupil_grid, focal_grid, focal_length)
# -
# We generate a segmented aperture for the segmented mirror. For convenience, we'll use the HiCAT pupil without spiders. We'll use supersampling to better resolve the segment gaps.
# +
aper, segments = hcipy.make_hexagonal_segmented_aperture(num_rings,
segment_flat_to_flat,
gap_size,
starting_ring=1,
return_segments=True)
aper = hcipy.evaluate_supersampled(aper, pupil_grid, 1)
segments = hcipy.evaluate_supersampled(segments, pupil_grid, 1)
plt.title('HCIPy aperture')
hcipy.imshow_field(aper, cmap='gray')
# -
# Now we make the segmented mirror. In order to be able to apply the SM to a plane, that plane needs to be a `Wavefront`, which combines a `Field` - here the aperture - with a wavelength, here `wavelength`.
#
# In this example here, since the SM doesn't have any extra effects on the pupil since it's still completely flat, we don't actually have to apply the SM, although of course we could.
# +
# Instantiate the segmented mirror
hsm = hcipy.SegmentedDeformableMirror(segments)
# Make a pupil plane wavefront from aperture
wf = hcipy.Wavefront(aper, wavelength)
# Apply SM if you want to
wf = hsm(wf)
plt.figure(figsize=(8, 8))
plt.title('Wavefront intensity at HCIPy SM')
hcipy.imshow_field(wf.intensity, cmap='gray')
plt.colorbar()
plt.show()
# -
# ### Poppy SM: `psm`
#
# We'll do the same for Poppy.
psm = poppy.dms.HexSegmentedDeformableMirror(name='Poppy SM',
rings=3,
flattoflat=segment_flat_to_flat*u.m,
gap=gap_size*u.m,
center=False)
# Display the transmission and phase of the poppy sm
plt.figure(figsize=(8, 8))
psm.display(what='amplitude')
# ## Create reference images
#
# ### HCIPy reference image
#
# We need to apply the SM to the wavefront in the pupil plane and then propagate it to the image plane.
# +
# Apply SM to pupil plane wf
wf_sm = hsm(wf)
# Propagate from SM to image plane
im_ref_hc = prop(wf_sm)
# +
# Display intensity and phase in image plane
plt.figure(figsize=(8, 8))
plt.suptitle('Image plane after HCIPy SM')
# Get normalization factor for HCIPy reference image
norm_hc = np.max(im_ref_hc.intensity)
hcipy.imshow_psf(im_ref_hc, normalization='peak')
# -
# ### Poppy reference image
#
# For the Poppy propagation, we need to make an optical system of which we then calculate the PSF. We match HCIPy's image scale with Poppy.
# +
# Make an optical system with the Poppy SM and a detector
psm.flatten()
pxscle = np.degrees(wavelength / pupil_diameter) * 3600 / sampling
fovarc = pxscle * 160
osys = poppy.OpticalSystem()
osys.add_pupil(psm)
osys.add_detector(pixelscale=pxscle, fov_arcsec=fovarc, oversample=1)
# +
# Calculate the PSF
psf = osys.calc_psf(wavelength)
plt.figure(figsize=(8, 8))
poppy.display_psf(psf, vmin=1e-9, vmax=0.1)
# Get the PSF as an array
im_ref_pop = psf[0].data
print('Poppy PSF shape: {}'.format(im_ref_pop.shape))
# Get normalization from Poppy reference image
norm_pop = np.max(im_ref_pop)
# -
# ### Both reference images side-by-side
# +
plt.figure(figsize=(15,6))
plt.subplot(1, 2, 1)
hcipy.imshow_field(np.log10(im_ref_hc.intensity / norm_hc), vmin=-10, cmap='inferno')
plt.title('HCIPy reference PSF')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.imshow(np.log10(im_ref_pop / norm_pop), origin='lower', vmin=-10, cmap='inferno')
plt.title('Poppy reference PSF')
plt.colorbar()
# +
ref_dif = im_ref_pop / norm_pop - im_ref_hc.intensity.shaped / norm_hc
lims = np.max(np.abs(ref_dif))
plt.figure(figsize=(15, 6))
plt.suptitle(f'Maximum relative error: {lims:0.2g} relative to the peak intensity')
plt.subplot(1, 2, 1)
plt.imshow(ref_dif, origin='lower', vmin=-lims, vmax=lims, cmap='RdBu')
plt.title('Full image')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.imshow(ref_dif[60:100,60:100], origin='lower', vmin=-lims, vmax=lims, cmap='RdBu')
plt.title('Zoomed in')
plt.colorbar()
# -
# ## Applying aberrations
# +
# Define function from rad of phase to m OPD
def aber_to_opd(aber_rad, wavelength):
aber_m = aber_rad * wavelength / (2 * np.pi)
return aber_m
aber_rad = 4.0
print('Aberration: {} rad'.format(aber_rad))
print('Aberration: {} m'.format(aber_to_opd(aber_rad, wavelength)))
# Poppy and HCIPy have a different way of indexing segments
# Figure out which index to poke on which mirror
poppy_index_to_hcipy_index = []
for n in range(1, num_rings + 1):
base = list(range(3 * (n - 1) * n + 1, 3 * n * (n + 1) + 1))
poppy_index_to_hcipy_index.extend(base[2 * n::-1])
poppy_index_to_hcipy_index.extend(base[:2 * n:-1])
poppy_index_to_hcipy_index = {j: i for i, j in enumerate(poppy_index_to_hcipy_index) if j is not None}
hcipy_index_to_poppy_index = {j: i for i, j in poppy_index_to_hcipy_index.items()}
# +
# Flatten both SMs just to be sure
hsm.flatten()
psm.flatten()
# Poking segment 35 and 25
for i in [35, 25]:
hsm.set_segment_actuators(i, aber_to_opd(aber_rad, wavelength) / 2, 0, 0)
psm.set_actuator(hcipy_index_to_poppy_index[i], aber_to_opd(aber_rad, wavelength) * u.m, 0, 0)
# Display both segmented mirrors in OPD
# HCIPy
plt.figure(figsize=(8,8))
plt.title('OPD for HCIPy SM')
hcipy.imshow_field(hsm.surface * 2, mask=aper, cmap='RdBu_r', vmin=-5e-7, vmax=5e-7)
plt.colorbar()
plt.show()
# Poppy
plt.figure(figsize=(8,8))
psm.display(what='opd')
plt.show()
# -
# ### Show focal plane images
# +
### HCIPy
# Apply SM to pupil plane wf
wf_fp_pistoned = hsm(wf)
# Propagate from SM to image plane
im_pistoned_hc = prop(wf_fp_pistoned)
### Poppy
# Calculate the PSF
psf = osys.calc_psf(wavelength)
# Get the PSF as an array
im_pistoned_pop = psf[0].data
### Display intensity of both cases image plane
plt.figure(figsize=(15, 6))
plt.suptitle('Image plane after SM for $\phi$ = ' + str(aber_rad) + ' rad')
plt.subplot(1, 2, 1)
hcipy.imshow_field(np.log10(im_pistoned_hc.intensity / norm_hc), cmap='inferno', vmin=-9)
plt.title('HCIPy pistoned pair')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.imshow(np.log10(im_pistoned_pop / norm_pop), origin='lower', cmap='inferno', vmin=-9)
plt.title('Poppy pistoned pair')
plt.colorbar()
# -
# ## A mix of piston, tip and tilt (PTT)
# +
aber_rad_tt = 200e-6
aber_rad_p = 1.8
opd_piston = aber_to_opd(aber_rad_p, wavelength)
### Put aberrations on both SMs
# Flatten both SMs
hsm.flatten()
psm.flatten()
## PISTON
for i in [19, 28, 23, 16]:
hsm.set_segment_actuators(i, opd_piston / 2, 0, 0)
psm.set_actuator(hcipy_index_to_poppy_index[i], opd_piston * u.m, 0, 0)
for i in [3, 35, 30, 8]:
hsm.set_segment_actuators(i, -0.5 * opd_piston / 2, 0, 0)
psm.set_actuator(hcipy_index_to_poppy_index[i], -0.5 * opd_piston * u.m, 0, 0)
for i in [14, 18, 1, 32, 12]:
hsm.set_segment_actuators(i, 0.3 * opd_piston / 2, 0, 0)
psm.set_actuator(hcipy_index_to_poppy_index[i], 0.3 * opd_piston * u.m, 0, 0)
## TIP and TILT
for i in [2, 5, 11, 15, 22]:
hsm.set_segment_actuators(i, 0, aber_rad_tt / 2, 0.3 * aber_rad_tt / 2)
psm.set_actuator(hcipy_index_to_poppy_index[i], 0, aber_rad_tt, 0.3 * aber_rad_tt)
for i in [4, 6, 26]:
hsm.set_segment_actuators(i, 0, -aber_rad_tt / 2, 0)
psm.set_actuator(hcipy_index_to_poppy_index[i], 0, -aber_rad_tt, 0)
for i in [34, 31, 7]:
hsm.set_segment_actuators(i, 0, 0, 1.3 * aber_rad_tt / 2)
psm.set_actuator(hcipy_index_to_poppy_index[i], 0, 0, 1.3 * aber_rad_tt)
# +
# Display both segmented mirrors in OPD
# HCIPy
plt.figure(figsize=(8,8))
plt.title('OPD for HCIPy SM')
hcipy.imshow_field(hsm.surface * 2, mask=aper, cmap='RdBu_r', vmin=-5e-7, vmax=5e-7)
plt.colorbar()
plt.show()
# Poppy
plt.figure(figsize=(8,8))
psm.display(what='opd')
plt.show()
# +
### Propagate to image plane
## HCIPy
# Propagate from pupil plane through SM to image plane
im_pistoned_hc = prop(hsm(wf)).intensity
## Poppy
# Calculate the PSF
psf = osys.calc_psf(wavelength)
# Get the PSF as an array
im_pistoned_pop = psf[0].data
# +
### Display intensity of both cases image plane
plt.figure(figsize=(18, 9))
plt.suptitle('Image plane after SM forrandom arangement')
plt.subplot(1, 2, 1)
hcipy.imshow_field(np.log10(im_pistoned_hc / norm_hc), cmap='inferno', vmin=-9)
plt.title('HCIPy random arangement')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.imshow(np.log10(im_pistoned_pop / norm_pop), origin='lower', cmap='inferno', vmin=-9)
plt.title('Poppy tipped arangement')
plt.colorbar()
plt.show()
| doc/tutorial_notebooks/SegmentedDMs/SegmentedDMs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# +
import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = False
matplotlib.rcParams['font.family'] = "Times New Roman"
lw, fs, fc, style = 2, 20, "#f0f0f0", 'seaborn-poster'
import pandas as pd
idx = pd.IndexSlice
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# -
def value_to_annotate(val):
if pd.isnull(val):
return ''
else:
return '{:.2f}'.format(val)
source_dict = {'W': 'Weather',
'T': 'Twitter',
'H': 'HealthMap',
'F': 'GFT',
'S': 'GST'}
table1 = pd.read_csv('./table1.tex', sep=r'\s*&\s*', na_values=['N/A'],
engine='python', skiprows=1,
converters={'Sources': lambda x: source_dict[x]})
table1.set_index(['Model', 'Sources'], inplace=True)
table1
# Plotting only MFN.
# Scaling to 100%
used_table1 = table1.ix[idx['MFN', :], :]
used_table1 = used_table1 * 100 / 4.
used_table1
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(10,10))
ax.grid(False)
divider = make_axes_locatable(ax)
im = ax.imshow(used_table1.values, cmap=matplotlib.cm.OrRd,
interpolation='none',)
cax = divider.append_axes("right", size="5%", pad=0.2)
# yticks
ax.set_yticks(pd.np.arange(0, used_table1.shape[0]))
ax.set_yticklabels(used_table1.index, fontsize=fs)
# xticks
ax.set_xticks(pd.np.arange(0, used_table1.shape[1]))
ax.set_xticklabels(table1.columns, fontsize=fs, rotation='vertical')
# annotation
for y in range(used_table1.shape[0]):
for x in range(used_table1.shape[1]):
val = used_table1.iloc[y, x]
ax.text(x , y , value_to_annotate(val),
horizontalalignment='center',
verticalalignment='center',
color='darkgreen', fontsize=10)
ax.set_xlabel('Countries', fontsize=fs)
ax.set_ylabel('Data Sources', fontsize=fs)
fig.colorbar(im, cax=cax)
fig.tight_layout()
fig.savefig('../figures/singleSource.png', dpi=600, bbox_inches='tight')
# **Uncomment this to plot all methods.**
# ```python
# with plt.style.context((style)):
# fig, ax = plt.subplots(figsize=(16,10))
# ax.grid(False)
# cax = ax.imshow(table1.values, cmap=matplotlib.cm.Reds,
# interpolation='none')
# ax.axhline(4.5, lw=4, color='k')
# ax.axhline(9.5, lw=4, color='k')
#
# # yticks
# ax.set_yticks(pd.np.arange(0, table1.shape[0], 5) + 2.25)
# ax.set_yticks(pd.np.arange(0, table1.shape[0]), minor=True)
#
# ax.set_yticklabels(table1.index.get_level_values(1),
# fontsize=20, minor=True)
# ax.set_yticklabels([ x + '\n\n\n' for x in table1.index.levels[0]],
# rotation="vertical", fontsize=24)
#
#
# # xticks
# ax.set_xticks(pd.np.arange(0, table1.shape[1]))
# ax.set_xticklabels(table1.columns, fontsize=24, rotation='vertical')
#
# # annotation
# for y in range(table1.shape[0]):
# for x in range(table1.shape[1]):
# ax.text(x , y , '${:.2f}$'.format(table1.iloc[y, x]),
# horizontalalignment='center',
# verticalalignment='center',
# color=fc
# )
# ax.set_title('Single Source accuracy', fontsize=30)
# fig.colorbar(cax)
# fig.tight_layout()
# fig.savefig('singleSource.png')
# ```
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(16,8))
used_table1.T.plot(kind='bar', ax=ax, legend=False,
colormap=matplotlib.cm.RdYlGn_r)
ax.legend(loc='best',
bbox_to_anchor=(1., 1.05),
fontsize=30)
# ax.set_title('Barplot: SingleSource', fontsize=30)
ax.set_ylabel('Accuracy', fontsize=30)
ax.set_xlabel('Counries', fontsize=30)
ax.set_ylim([0, 4])
ax.tick_params(axis='both', labelsize=24)
fig.tight_layout()
fig.savefig('./SingleSource_bar.png', bbox_inches='tight')
# ## Fusion
table2 = pd.read_csv('./table2.tex', sep=r'\s*&\s*', na_values=['N/A'],
engine='python', skiprows=1, index_col=0)
used_table2 = table2 * 100 / 4.0
# +
# Appending Single Source Best
singleMax = used_table1.max(axis=0)
singleMax.name = 'Single Source: Best'
saved_table2 = used_table2.append(singleMax)
# plotting table
plot_table2 = 100 * (used_table2 - singleMax) / singleMax
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(8, 6))
plot_table2.T.plot(kind='bar', ax=ax, legend=False)
ax.legend(loc='best', fontsize=30)
ax.tick_params(axis='both', labelsize=24)
ax.set_ylabel('Percent Increase in Accuracy', fontsize=30)
ax.set_xlabel('Countries', fontsize=30)
fig.tight_layout()
fig.savefig('../figures/ModelVsData.png', dpi=600) #, bbox_inches='tight'
# -
# **TO plot full bars**
# ```python
# with plt.style.context((style)):
# fig, ax = plt.subplots(figsize=(16,10))
#
# used_table2.T.plot(kind='bar', ax=ax, legend=False)
# ax.legend(loc='best',
# #bbox_to_anchor=(1.15, 0.9),
# fontsize=30)
# # ax.set_title('Model vs Data Fusion', fontsize=30)
# ax.set_ylim([0, 100.0])
#
# ax.set_ylabel('Percent Accuracy', fontsize=30)
# ax.set_xlabel('Countries', fontsize=30)
#
# ax.tick_params(axis='both', labelsize=24)
#
# fig.tight_layout()
# fig.savefig('./ModelVsData.png', dpi=300) #, bbox_inches='tight'
# ```
# ## Ablation
# +
table3 = pd.read_csv('./table3.tex', sep=r'\s*&\s*', na_values=['N/A'],
engine='python', skiprows=1, index_col=0)
shift_table2 = ((table3.iloc[0, :].values - table3.iloc[1:, :])
/ table3.iloc[0, :].values) * 100
shift_table2
# -
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(10,10))
ax.grid(False)
im = ax.imshow(shift_table2.values, cmap=matplotlib.cm.OrRd,
interpolation='none')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
# yticks
ax.set_yticks(pd.np.arange(0, shift_table2.shape[0]))
ax.set_yticklabels(shift_table2.index, fontsize=fs)
# xticks
ax.set_xticks(pd.np.arange(0, table1.shape[1]))
ax.set_xticklabels(table1.columns, fontsize=fs, rotation='vertical')
# annotation
for y in range(shift_table2.shape[0]):
for x in range(shift_table2.shape[1]):
val = shift_table2.iloc[y, x]
ax.text(x , y , value_to_annotate(val),
horizontalalignment='center',
verticalalignment='center',
color='darkgreen', fontsize=10
)
fig.colorbar(im, cax=cax)
ax.set_ylabel('Ablated Source', fontsize=fs)
ax.set_xlabel('Countries', fontsize=fs)
fig.tight_layout()
fig.savefig('../figures/Ablation.png', bbox_inches='tight', dpi=600)
# # Correction
table4 = pd.read_csv('./table4.tex', sep=r'\s*&\s*', na_values=['N/A'],
engine='python', skiprows=1, index_col=0)
selected_table4 = table4.ix[['None', 'Combined'], :].T
selected_table4 = selected_table4 / 4.0 * 100
selected_table4
plot_table4 = ((selected_table4['Combined'] - selected_table4['None'])
/ selected_table4['Combined']) * 100
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(7, 5))
plot_table4.plot(kind='bar', ax=ax, legend=False, color='lightblue')
ax.set_ylabel('Percent Increase in Accuracy', fontsize=fs)
ax.set_xlabel('Countries', fontsize=fs)
ax.tick_params(axis='both', labelsize=fs)
fig.tight_layout()
fig.savefig('../figures/PahoCorrection.png', dpi=600)
# **to plot absolute values**
# ```python
# with plt.style.context((style)):
# fig, ax = plt.subplots(figsize=(16,10))
#
# selected_table4.plot(kind='bar', ax=ax, legend=False)
# ax.legend(['w/o Correction', 'w/ Correction'],
# loc='best',
# # bbox_to_anchor=(1.0, 0.9),
# fontsize=30)
#
# ax.set_ylim([0, 100.0])
#
# ax.set_ylabel('Percent Accuracy', fontsize=30)
# ax.set_xlabel('Countries', fontsize=30)
#
# ax.tick_params(axis='both', labelsize=24)
#
#
# fig.tight_layout()
# fig.savefig('./PahoCorrection.png', dpi=300) # ,bbox_inches='tight'
# ```
# # FLu Segregation
# +
table5 = pd.read_csv('./table5.tsv', delimiter='\t')
table5.set_index('Segregation Level', inplace=True)
plot_table5 = table5
plot_table5
# -
with plt.style.context((style)):
fig, ax = plt.subplots(figsize=(8,6))
plot_table5.plot(kind='bar', ax=ax, legend=False)
ax.legend(['Strain Segregated', 'Age Segregated'],
loc='lower right',
#bbox_to_anchor=(0.7, 0.9),
fontsize=30)
#ax.set_ylim([-4, 4])
ax.set_ylabel('Percent Increase in Accuracy', fontsize=30)
ax.set_xlabel('HHS Regions', fontsize=fs)
ax.tick_params(axis='both', labelsize=fs)
fig.tight_layout()
fig.savefig('../figures/FluSegregation.png', dpi=600, bbox_inches='tight')
# **to plot absolute value**
# ```python
# with plt.style.context((style)):
# fig, ax = plt.subplots(figsize=(16,10))
#
# used_table5.plot(kind='bar', ax=ax, legend=False)
# ax.legend(['Unsegregated', 'Strain Segregated', 'Age Segregated'],
# loc='best',
# bbox_to_anchor=(0.7, 0.9),
# fontsize=30)
#
# ax.set_ylim([0, 100.0])
#
# ax.set_ylabel('Percent Accuracy', fontsize=30)
# ax.set_xlabel('HHS Regions', fontsize=30)
#
# ax.tick_params(axis='both', labelsize=24)
#
#
# fig.tight_layout()
# fig.savefig('./FluSegregation.png', dpi=300, bbox_inches='tight')
# ```
| experiments/HowNotToPlots_Tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
# +
with open('dumping-instagram-6-july-2019.json') as fopen:
instagram = json.load(fopen)
len(instagram)
# -
instagram[0]
# +
with open('dumping-twitter-6-july-2019.json') as fopen:
twitter = json.load(fopen)
len(twitter)
# -
import cleaning
twitter = twitter + instagram
# +
# %%time
twitter = cleaning.multiprocessing(twitter, cleaning.cleaning_strings)
# +
# %%time
temp_vocab = list(set(cleaning.multiprocessing(twitter, cleaning.unique_words)))
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.duplicate_dots_marks_exclamations, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.remove_underscore, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.isolate_spamchars, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.break_short_words, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.break_long_words, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.remove_ending_underscore, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.remove_starting_underscore, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.end_punct, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.start_punct, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
# %%time
temp_dict = cleaning.multiprocessing(temp_vocab, cleaning.join_dashes, list_mode = False)
print(len(temp_dict))
# +
# %%time
twitter = cleaning.multiprocessing_multiple(twitter, temp_dict, cleaning.string_dict_cleaning)
# -
twitter[:100]
# +
import itertools
import re
from tqdm import tqdm
_list_laughing = {
'huhu',
'haha',
'gaga',
'hihi',
'wkawka',
'wkwk',
'kiki',
'keke',
'huehue',
}
def last_cleaning(string):
string = re.sub(r'[ ]+', ' ', string.lower()).strip().split()
string = [
word
for word in string
if not any([laugh in word for laugh in _list_laughing])
and word[: len(word) // 2] != word[len(word) // 2 :]
]
string = ' '.join(string)
string = (
''.join(''.join(s)[:2] for _, s in itertools.groupby(string))
)
return string
def last_cleaning_strings(strings):
for i in tqdm(range(len(strings))):
strings[i] = last_cleaning(strings[i])
return strings
# +
# %%time
twitter = cleaning.multiprocessing(twitter, last_cleaning_strings)
# -
twitter[:100]
with open('ms-socialmedia.txt', 'w') as fopen:
fopen.write(' '.join(twitter))
| pretrained-model/wordvector/social-media-preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
movie = pd.read_csv("./data/IMDB-Movie-Data.csv")
movie.head()
pd.notnull(movie).head()
np.all(pd.notnull(movie))
| numpytest/jupyter/test10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from statsmodels.stats.proportion import proportion_confint
from math import sqrt
from scipy.stats import norm
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
# -
X, y = load_breast_cancer(return_X_y=True)
len(X) == len(y)
train, test, train_labels, test_labels = train_test_split(X,
y,
test_size=0.33,
random_state=0)
dt = DecisionTreeClassifier(random_state=0)
dt.fit(train, train_labels)
preds = dt.predict(test)
tn, fp, fn, tp = confusion_matrix(y_true=test_labels, y_pred=preds).ravel()
print(f"{tn}\t{fp}\n{fn}\t{tp}")
# ---
#
# ${\displaystyle {\text{Precision}}={\frac {tp}{tp+fp}}}$
#
# ${\displaystyle {\text{Recall}}={\frac {tp}{tp+fn}}}$
#
# ${\displaystyle {\text{FPR}}={\frac {fp}{fp + tn}}}$
#
# ---
# +
# Binomial proportion confidence interval via normal approximation
# More info: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
def get_asymptotic_normal_approximation(metric, n, alpha=0.05):
z = norm.ppf(1 - (alpha / 2))
interval = z * sqrt((metric * (1 - metric)) / n)
upper = metric + interval
lower = metric - interval
return lower, upper
# +
precision = tp / (tp + fp)
print(f"Precision: {round(precision, 3)}")
recall = tp / (tp + fn)
print(f"Recall: {round(recall, 3)}")
fpr = fp / (fp + tn)
print(f"FPR: {round(fpr, 3)}")
# +
# Precision
lower, upper = get_asymptotic_normal_approximation(precision, (tp+fp))
print(f"Manual: lower = {round(lower,3)}, upper = {round(upper, 3)}")
lower, upper = proportion_confint(count=tp, nobs=(tp+fp), alpha=0.05, method="normal")
print(f"statsmodels: lower = {round(lower,3)}, upper = {round(upper, 3)}")
# +
# Recall
lower, upper = get_asymptotic_normal_approximation(recall, (tp+fn))
print(f"Manual: lower = {round(lower,3)}, upper = {round(upper, 3)}")
lower, upper = proportion_confint(count=tp, nobs=(tp+fn), alpha=0.05, method="normal")
print(f"statsmodels: lower = {round(lower,3)}, upper = {round(upper, 3)}")
# +
# FPR
lower, upper = get_asymptotic_normal_approximation(fpr, (fp+tn))
print(f"Manual: lower = {round(lower,3)}, upper = {round(upper, 3)}")
lower, upper = proportion_confint(count=fp, nobs=(fp+tn), alpha=0.05, method="normal")
print(f"statsmodels: lower = {round(lower,3)}, upper = {round(upper, 3)}")
| binomial_ci_normal_approx.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# ### 3.2.1 CBOWモデルの推論処理
# Python(NumPy)では1次元配列と$1 \times x$行列は同じ扱いだが,Juliaでは扱いが異なる。
# これは配列の扱いがJuliaの列指向とPythonの行指向で異なるためである。
# そのため,行指向の設計思想を基に実装されたアルゴリズムを列指向の設計思想で再実装することが理想ではあるが,Python版の行列やテンソルの形状と一致させるため,(あと初心者なので…)無理矢理,行指向の設計思想でコードを実装している。
#
# 参考:
# > Julia arrays are column major (Fortran ordered) whereas NumPy arrays are row major (C-ordered) by default.
#
# Source: https://docs.julialang.org/en/v1/manual/noteworthy-differences/#Noteworthy-differences-from-Python
include("../common/layers.jl")
# サンプルのコンテキストデータ
c0 = [1 0 0 0 0 0 0]
c1 = [0 0 1 0 0 0 0]
# 重みの初期化
W_in = randn(7, 3)
W_out = randn(3, 7)
# レイヤの生成
in_layer0 = MatMul(W_in)
in_layer1 = MatMul(W_in)
out_layer = MatMul(W_out)
# 順伝播
h0 = forward!(in_layer0, c0)
h1 = forward!(in_layer1, c1)
h = 0.5 .* (h0 + h1)
s = forward!(out_layer, h)
| ch03/cbow_predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Numeric data or ... ?
#
# In this exercise, and throughout this chapter, you'll be working with bicycle ride sharing data in San Francisco called `ride_sharing`. It contains information on the start and end stations, the trip duration, and some user information for a bike sharing service.
#
# The `user_type` column contains information on whether a user is taking a free ride and takes on the following values:
#
# - `1` for free riders.
# - `2` for pay per ride.
# - `3` for monthly subscribers.
#
# In this instance, you will print the information of `ride_sharing` using `.info()` and see a firsthand example of how an incorrect data type can flaw your analysis of the dataset.
#
# Instructions
#
# - Print the information of `ride_sharing`.
# - Use `.describe()` to print the summary statistics of the `user_type` column from `ride_sharing`.
# - By looking at the summary statistics - they don't really seem to offer much description on how users are distributed along their purchase type, why do you think that is?
# - Convert `user_type` into categorical by assigning it the `'category'` data type and store it in the `user_type_cat` column.
# - Make sure you converted `user_type_cat` correctly by using an `assert` statement.
# +
# Import packages
import pandas as pd
# Import dataframe
ride_sharing = pd.read_csv('ride_sharing.csv', index_col=0)
# Print the information of ride_sharing
print(ride_sharing.info(), '\n')
# Print summary statistics of user_type column
print(ride_sharing['user_type'].describe())
# +
# Convert user_type from integer to category
ride_sharing['user_type_cat'] = ride_sharing['user_type'].astype('category')
# Write an assert statement confirming the change
assert ride_sharing['user_type_cat'].dtype == 'category'
# Print new summary statistics
print(ride_sharing.info())
# -
# ## Summing strings and concatenating numbers
#
# In the previous exercise, you were able to identify that `category` is the correct data type for `user_type` and convert it in order to extract relevant statistical summaries that shed light on the distribution of `user_type`.
#
# Another common data type problem is importing what should be numerical values as strings, as mathematical operations such as summing and multiplication lead to string concatenation, not numerical outputs.
#
# In this exercise, you'll be converting the string column `duration` to the type `int`. Before that however, you will need to make sure to strip `"minutes"` from the column in order to make sure pandas reads it as numerical. The pandas package has been imported as pd.
#
# Instructions
#
# - Use the `.strip()` method to strip `duration` of `"minutes"` and store it in the `duration_trim` column.
# - Convert `duration_trim` to `int` and store it in the `duration_time` column.
# - Write an `assert` statement that checks if `duration_time`'s data type is now an `int`.
# - Print the average ride duration.
# +
# Strip duration of minutes
ride_sharing['duration_trim'] = ride_sharing['duration'].str.strip('minutes')
# Convert duration to integer
ride_sharing['duration_time'] = ride_sharing['duration_trim'].astype('int')
# Write an assert statement making sure of conversion
assert ride_sharing['duration_time'].dtype == 'int'
# Print formed columns and calculate average ride duration
print(ride_sharing[['duration', 'duration_trim', 'duration_time']])
print(ride_sharing['duration_time'].mean())
# -
# ## Tire size constraints
#
# In this lesson, you're going to build on top of the work you've been doing with the `ride_sharing` DataFrame. You'll be working with the `tire_sizes` column which contains data on each bike's tire size.
#
# Bicycle tire sizes could be either 26″, 27″ or 29″ and are here correctly stored as a categorical value. In an effort to cut maintenance costs, the ride sharing provider decided to set the maximum tire size to be 27″.
#
# In this exercise, you will make sure the `tire_sizes` column has the correct range by first converting it to an integer, then setting and testing the new upper limit of 27″ for tire sizes.
#
# Instructions
#
# - Convert the `tire_sizes` column from `category` to `'int'`.
# - Use `.loc[]` to set all values of tire_sizes above 27 to 27.
# - Reconvert back `tire_sizes` to `'category'` from int.
# - Print the description of the `tire_sizes`.
# +
# Convert tire_sizes to integer
ride_sharing['tire_sizes'] = ride_sharing['tire_sizes'].astype('int')
# Set all values above 27 to 27
ride_sharing.loc[ride_sharing['tire_sizes'] > 27, 'tire_sizes'] = 27
# Reconvert tire_sizes back to categorical
ride_sharing['tire_sizes'] = ride_sharing['tire_sizes'].astype('category')
# Print tire size description
print(ride_sharing['tire_sizes'].describe())
# -
# ## Back to the future
#
# A new update to the data pipeline feeding into the `ride_sharing` DataFrame has been updated to register each ride's date. This information is stored in the `ride_date column` of the type `object`, which represents strings in `pandas`.
#
# A bug was discovered which was relaying rides taken today as taken next year. To fix this, you will find all instances of the `ride_date` column that occur anytime in the future, and set the maximum possible value of this column to today's date. Before doing so, you would need to convert `ride_date` to a `datetime` object.
#
# The `datetime` package has been imported as `dt`, alongside all the packages you've been using till now.
#
# Instructions
#
# - Convert `ride_date` to a `datetime` object and store it in `ride_dt` column using `to_datetime()`.
# - Create the variable `today`, which stores today's date by using the `dt.date.today()` function.
# - For all instances of `ride_dt` in the future, set them to today's date.
# - Print the maximum date in the `ride_dt` column.
# +
# Import datetime
import datetime as dt
# Convert ride_date to datetime
ride_sharing['ride_dt'] = pd.to_datetime(ride_sharing['ride_date'])
# Save today's date
today = dt.date.today()
#######ERRO####### Set all in the future to today's date
#ride_sharing.loc[ride_sharing['ride_dt'] > today, 'ride_dt'] = today
# Print maximum of ride_dt column
print(ride_sharing['ride_dt'].max())
# -
# ## How big is your subset?
#
# You have the following `loans` DataFrame which contains loan and credit score data for consumers, and some metadata such as their first and last names. You want to find both complete and incomplete duplicates using `.duplicated()`.
#
# ```
# first_name last_name credit_score has_loan
# ---------------------------------------------------------------
# Justin Saddlemeyer 600 1
# Hadrien Lacroix 450 0
# ```
#
# Choose the **correct** usage of `.duplicated()` below:
# `loans.duplicated(subset = ['first_name', 'last_name'], keep = False)` because subsetting on consumer metadata and not discarding any duplicate returns all duplicated rows.
# ## Finding duplicates
#
# A new update to the data pipeline feeding into `ride_sharing` has added the `ride_id` column, which represents a unique identifier for each ride.
#
# The update however coincided with radically shorter average ride duration times and irregular user birth dates set in the future. Most importantly, the number of rides taken has increased by 20% overnight, leading you to think there might be both complete and incomplete duplicates in the `ride_sharing` DataFrame.
#
# In this exercise, you will confirm this suspicion by finding those duplicates. A sample of `ride_sharing` is in your environment, as well as all the packages you've been working with thus far.
#
# Instructions
#
# - Find duplicated rows of `ride_id` in the `ride_sharing` DataFrame while setting `keep` to `False`.
# - Subset `ride_sharing` on `duplicates` and sort by `ride_id` and assign the results to `duplicated_rides`.
# - Print the `ride_id`, `duration` and `user_birth_year` columns of `duplicated_rides` in that order.
# +
# Find duplicates
duplicates = ride_sharing.duplicated(subset='ride_id', keep=False)
# Sort your duplicated rides
duplicated_rides = ride_sharing[duplicates].sort_values('ride_id')
# Print relevant columns
print(duplicated_rides[['ride_id','duration','user_birth_year']])
# -
# ## Treating duplicates
#
# In the last exercise, you were able to verify that the new update feeding into `ride_sharing contains` a bug generating both complete and incomplete duplicated rows for some values of the `ride_id` column, with occasional discrepant values for the `user_birth_year` and `duration` columns.
#
# In this exercise, you will be treating those duplicated rows by first dropping complete duplicates, and then merging the incomplete duplicate rows into one while keeping the average `duration`, and the minimum `user_birth_year` for each set of incomplete duplicate rows.
#
# Instructions
#
# - Drop complete duplicates in `ride_sharing` and store the results in `ride_dup`.
# - Create the statistics dictionary which holds **minimum** aggregation for `user_birth_year` and **mean** aggregation for `duration`.
# - Drop incomplete duplicates by grouping by `ride_id` and applying the aggregation in `statistics`.
# - Find duplicates again and run the `assert` statement to verify de-duplication.
# +
# Drop complete duplicates from ride_sharing
ride_dup = ride_sharing.drop_duplicates()
# Create statistics dictionary for aggregation function
statistics = {'user_birth_year': 'min', 'duration': 'mean'}
# Group by ride_id and compute new statistics
ride_unique = ride_dup.groupby('ride_id').agg(statistics).reset_index()
# Find duplicated values again
duplicates = ride_unique.duplicated(subset='ride_id', keep=False)
duplicated_rides = ride_unique[duplicates == True]
# Assert duplicates are processed
assert duplicated_rides.shape[0] == 0
| cleaning_data_in_python/1_common_data_problems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# cd /Users/cfe/Dev/fastapi-nosql/
from requests_html import HTML
from slugify import slugify
import pprint
import re
from app import scraper
url = "https://www.amazon.com/Vitamix-E310-Explorian-Professional-Grade-Container/dp/B0758JHZM3/ref=sr_1_3?dchild=1&keywords=vitamix&qid=1632337952&sr=8-3&th=1"
s = scraper.Scraper(url=url, endless_scroll=True)
html_str = s.get()
html_obj = HTML(html=html_str)
def extract_element_text(html_obj, element_id):
el = html_obj.find(element_id, first=True)
if not el:
return ''
return el.text
price_str = extract_element_text(html_obj, '#priceblock_ourprice')
price_str
title_str = extract_element_text(html_obj, "#productTitle")
title_str
def extract_tables(html_obj):
return html_obj.find("table")
tables = extract_tables(html_obj)
tables
def extract_price_from_string(value: str, regex=r"[\$]{1}[\d,]+\.?\d{0,2}"):
x = re.findall(regex, value)
val = None
if len(x) == 1:
val = x[0]
return val
def extract_dataset(tables):
dataset = {}
for table in tables:
for tbody in table.element.getchildren():
for tr in tbody.getchildren():
row = []
for col in tr.getchildren():
content = ""
try:
content = col.text_content()
except:
pass
if content != "":
_content = content.strip()
row.append(_content)
if len(row) != 2:
continue
key = row[0]
value = row[1]
# print(key, value)
data = {}
key = slugify(key)
if key in dataset:
continue
else:
if "$" in value:
new_key = key
old_key = f'{key}_raw'
new_value = extract_price_from_string(value)
old_value = value
dataset[new_key] = new_value
dataset[old_key] = old_value
else:
dataset[key] = value
return dataset
dataset = extract_dataset(tables)
pprint.pprint(dataset)
| nbs/Scrape with Selenium & Parse with Reqeusts-HTML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building two simple polynomial regression model
# We generate polynomial regressions for two data sets and compare the R2 scores to linear regressions.
# ## Case 1: Profit prediction for an agricultural problem
#
# In the following we would like to predict profits on harvest for certain field sizes.
# +
#importing the necessary packages
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# -
#define data frame
df = pd.read_csv("fields.csv")
df.head()
# We start off with a simple linear regression:
# +
#define the variables
X = df[["width", "length"]].values
Y = df[["profit"]].values
#split the data set into training and test set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 0, test_size = 0.25)
#train the model
model = LinearRegression()
model.fit(X_train, Y_train)
#report the R2 score
print(model.score(X_test, Y_test))
# -
# In a next step we proceed with an attempt at polynomial fitting for the data:
# +
# #PolynomialFeatures?
pf = PolynomialFeatures(degree = 2, include_bias = False) #bias term not needed here
#need to fit the training data accordingly (demanded by sklearn) to adapt to polynomial fitting
pf.fit(X_train)
#generate new columns
X_train_transformed = pf.transform(X_train)
X_test_transformed = pf.transform(X_test)
#print all possible arrangements to get to a polynomial of degree 2 (as done by the transform method)
#print(pf.powers_)
model = LinearRegression()
model.fit(X_train_transformed, Y_train)
print(model.score(X_test_transformed, Y_test))
# -
# We may redo the analysis without the random_state option in the train test function:
# +
scores = []
intercepts = []
coefs = []
for i in range(0,1000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25)
X_train_transformed = pf.transform(X_train)
X_test_transformed = pf.transform(X_test)
model = LinearRegression()
model.fit(X_train_transformed, Y_train)
intercepts.append(model.intercept_)
coefs.append(model.coef_)
scores.append(model.score(X_test_transformed, Y_test))
print("Average score: " + str(sum(scores)/ len(scores)))
# +
#np.array(coefs).shape
# -
# Now we would like to filter out columns from the fitting procedure:
# +
scores = []
intercepts = []
coefs = []
for i in range(0,1000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25)
#we may exclude certain columns from the fitting and check if the score improves
X_train_transformed = pf.transform(X_train)[:, [0, 1, 2, 3, 4]]
X_test_transformed = pf.transform(X_test)[:, [0, 1, 2, 3, 4]]
model = LinearRegression()
model.fit(X_train_transformed, Y_train)
intercepts.append(model.intercept_)
coefs.append(model.coef_)
scores.append(model.score(X_test_transformed, Y_test))
print("Average score: " + str(sum(scores)/ len(scores)))
# -
# ## Case 2: Diamond price prediction
#
# In the following we would like to model the prices of diamonds via linear and polynomial regressions and compare the quality of the results via the R2 score.
#define data frame
df = pd.read_csv("diamonds.csv")
df.head()
# We start off with two simple linear regressions to get a feeling for the system:
# +
# price over carat
#define the variables
X = df[["carat"]].values
Y = df[["price"]].values
#split the data set into training and test set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 0, test_size = 0.25)
#train the model
model = LinearRegression()
model.fit(X_train, Y_train)
#report the R2 score
print(model.score(X_test, Y_test))
# +
# price over dimensions x,y,z
#define the variables
X = df[["x","y","z"]].values
Y = df[["price"]].values
#split the data set into training and test set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 0, test_size = 0.25)
#train the model
model = LinearRegression()
model.fit(X_train, Y_train)
#report the R2 score
print(model.score(X_test, Y_test))
# -
# Comment: Given this carat seems to be a good indicator for gauging the price of a diamond.
# Now we try a polynomial regression:
# +
# price over dimensions x,y,z
#define the variables
X = df[["x", "y", "z"]].values
Y = df[["price"]].values
pf = PolynomialFeatures(degree = 2, include_bias = False) #bias term not needed here
#need to fit the training data accordingly (demanded by sklearn) to adapt to polynomial fitting
pf.fit(X_train)
scores = []
intercepts = []
coefs = []
for i in range(0,200):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25)
#we may exclude certain columns from the fitting and check if the score improves
X_train_transformed = pf.transform(X_train)
X_test_transformed = pf.transform(X_test)
model = LinearRegression()
model.fit(X_train_transformed, Y_train)
intercepts.append(model.intercept_)
coefs.append(model.coef_)
scores.append(model.score(X_test_transformed, Y_test))
print("Average score: " + str(sum(scores)/ len(scores)))
# -
# This result indicates that in the current setting the linear regression via dimensions outperforms the polynomial one while the linear and polynomial regressions via carats outperform those with respect to dimensions.
| simplepolynomialregressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Fellowship.ai Challenge**
# * **Name**: <NAME>
# * **Email** : <EMAIL>
# * **Region**: ASIA - IST
# ## Topic Chosen - ULMFiT Sentiment : Apply a supervised or semi-supervised ULMFiT model to Twitter US Airlines Sentiment
# * Environment Used for notebook - Kaggle Kernels
# * Dataset Used : twitter-airline-sentiment on kaggle (https://www.kaggle.com/crowdflower/twitter-airline-sentiment#Tweets.csv)
# * Method used for sentiment analysis: ULMFit method (making use of RNNs and LSTMs)
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
# ## **Importing dependencies**
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
#utilities for data processing and algebra
import numpy as np
import pandas as pd
#for specialized container datatypes
import collections
#for plotting and data visualization
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
#for feature preprocessing
import re
#sklearn for machine learning
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils.multiclass import unique_labels
#extension to reload modules before executing user code
# %reload_ext autoreload
# %autoreload 2
#importing os to analyse and organize the directory structure
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# -
# ## **Importing fastai for Natural Language Processing and ULMFiT**
from fastai import *
from fastai.text import *
# ## **Importing utilities for collecting/checking fastai user environment**
import fastai.utils.collect_env
fastai.utils.collect_env.show_install()
# # **Data Representation and pre-processing**
path = Path('../input/twitter-airline-sentiment/')
file_name = 'Tweets.csv'
path.ls()
# ## **Shuffling & reading the data into a pandas dataframe**
file_path = path / file_name
df_tweets = pd.read_csv(file_path)
df_tweets = df_tweets.reindex(np.random.permutation(df_tweets.index))
df_tweets.head()
# ## **analysing features and plotting relevant ones**
df_tweets.info()
df_tweets.describe()
# We see that the two most relevant features for us to perform sentiment analysis would be the columns 'airline_sentiment' and the 'text' that is contained in the tweet.
# **Plotting the different sentiments wrt the types and the airline towards which these sentiments were directed.**
df_tweets['airline_sentiment'].value_counts().plot(kind='bar')
# From the above graph, we can clearly see that most of the tweets in the given dataset are negative, followed by neutral. The number of positive tweets are relatively fewer in number.
df_tweets['airline'].value_counts().plot(kind='bar')
df_tweets.groupby(['airline', 'airline_sentiment']).size().unstack().plot(kind='bar', stacked=True)
# **From the above two graphs we can see that there is a clear correlation between the degree of passenger satisfaction and the specific airline they were flying with, hence while feature engineering, we would have to filter out the airline name from our dataset so that our model doesn't learn it as a parameter and make it a basis for prediction.**
# **Analysing the sentiment on the basis of the length of the tweet**
df_tweets['tweet_length'] = df_tweets['text'].apply(len)
df_tweets.groupby(['tweet_length', 'airline_sentiment']).size().unstack().plot(kind='line', stacked=False)
# We see that there isn't alot of correation between the tweet length and the sentiment behind the tweet. The only conclusion that can safely be drawn from the above graph is the fact that the distribution is heavily skewed in the case of negative tweets, as the more dissatisfied the passenger, the more they have to say.
# Lets check if any such conclusion can be drawn when it comes to average and median sentiment confidence as well.
df_tweets[['tweet_length', 'airline_sentiment', 'airline_sentiment_confidence']].groupby(['tweet_length', 'airline_sentiment']).mean().unstack().plot(kind='line', stacked=False)
plt.title('Average Airline Sentiment vs tweet length')
df_tweets[['tweet_length', 'airline_sentiment', 'airline_sentiment_confidence']].groupby(['tweet_length', 'airline_sentiment']).median().unstack().plot(kind='line', stacked=False)
plt.title('Median Airline Sentiment vs tweet length')
# As no distinguishable and concrete relationship can be inferred from the confidence and tweet length, we will form a dataframe considering the text and airline_sentiment attribute.
features = ['airline_sentiment','text']
df_needed = df_tweets[features]
pd.set_option('display.max_colwidth', 0)
df_needed.head()
df_needed.info()
df_needed.describe()
# **Checking for missing values in our data**
df_needed.isna().sum()
# +
#df_needed['airline_sentiment'].count()
# -
df_needed['airline_sentiment'].value_counts()
sns.set(style = "whitegrid")
sns.set(rc={'figure.figsize': (12,10)})
sns.countplot(x="airline", hue = 'airline_sentiment', data=df_tweets)
plt.title("Airline Sentiment for each US Airline")
# ### As we see that customer sentiments are somewhat specific to the airline they are using to travel, hence we will have to remove the names of the airlines so that our predictive model is not influenced by the names of the airlines because of the high correlation.
reg = r"@(VirginAmerica|SouthwestAir|united|AmericanAir|Delta|USAirways)"
# Writing a function to replace the airline name
def text_filter(text):
return re.sub(reg,'@airline',text, flags = re.IGNORECASE)
df_needed['text'] = df_needed['text'].apply(text_filter)
df_needed.head(15)
# We have successfully removed the airlines names from our dataframe.
# Now we convert the obtained dataframe into a databunch, as required by our training method.
# # ULMFiT training method
# **Splitting the dataset into train and test sets, initializing hyperparameters and converting it into a Databunch**
train, test = train_test_split(df_needed, test_size =0.1)
moms = (0.8,0.7)
wd = 0.1
# ## *Tokenization*
#
# We read in the data and add new words to our dictionary, as well as create a representation of words using numbers.
data = TextLMDataBunch.from_df(path, train_df = train, valid_df = test)
data.show_batch()
# **As we can see from the above dataframe, the following changes have been implemented for the ease of data modeling and model training:**
# * splitting on spaces
# * splitting on punctuation marks
# * several special tokens have been usd to replace the unknown tokens.
# * Contractions like "wouldn't", have been separated.
# Representation of unknown tokens can further be understood by looking at the below list for the entries beginning with symbols ('xx').
data.vocab.itos[:10]
# Text representation post pre-processing
print(data.train_ds[0][0])
print(data.train_ds[2][0])
print(data.train_ds[4][0])
# Numerical representation post pre-processing
print(data.train_ds[0][0].data[:10])
print(data.train_ds[2][0].data[:10])
print(data.train_ds[4][0].data[:10])
# # Language Model
# +
#learn = language_model_learner(data, AWD_LSTM, drop_mult=0.3)
# -
# Defining the language model and setting the leaning rates.
learn = language_model_learner(data, AWD_LSTM, drop_mult=0.5, model_dir='/tmp/models')
learn.freeze()
learn.lr_find()
learn.recorder.plot()
learn.recorder.plot(skip_end=15)
# Now, we fit the model for a few cycles by running 1 epoch and then unfreezing and running subsequent epochs to fine tune the same.
moms = (0.8,0.7)
wd = 0.1
lr = 1.0E-02
learn.fit_one_cycle(1, lr, moms=moms, wd=wd)
learn.unfreeze()
learn.fit_one_cycle(10,lr,moms = moms, wd=wd)
# After training the model for 10 epochs, we see that the accuracy of our model is very low, lets check the prediction on a custom input.
learn.predict("This airline is so bad! Never flying with them again!", n_words = 40)
# **The language model doesn't perform too well due to a variety of reasons -**
# * As the tweets normally have no fixed format and include informal, often incorrect gramatical structures.
# * Words in tweets often don't follow correct spellings and generic language conventions.
# * As seen above, figures of speech like 'sarcasm' are difficult to account for.
learn.save_encoder('fine_tuned_enc')
# ## **Fine tuning our classifier**
#
# Now we will repeat the process of creating and training a databrunch. Then train our LSTM language model all over again, but this time to predict the sentiments of the tweet and not just the word/words that follow.
# This model architecture offers alot of advantage over its traditional language counterpart which uses the conventional bag of words representation for predictions.
# **Splitting into test, train and validation sets**
train_valid, test = train_test_split(df_needed, test_size = 0.1)
train, valid = train_test_split(train_valid, test_size = 0.1)
data_classifier = TextClasDataBunch.from_df(path, train_df = train, valid_df = valid, test_df = test, vocab = data.train_ds.vocab, text_cols = 'text', label_cols = 'airline_sentiment', bs = 24)
data_classifier.show_batch()
learn = text_classifier_learner(data_classifier, AWD_LSTM, drop_mult = 0.5, model_dir = '/tmp/models')
learn.load_encoder('fine_tuned_enc')
learn.freeze()
learn.lr_find()
learn.recorder.plot()
learn.recorder.plot(skip_end = 15)
# **We train by gradually unfreezing layers and training our model one epoch at a time, in accordance to the suggestions in the fast.ai ULMFiT paper.**
lr = 1.0E-03
learn.fit_one_cycle(1,lr,moms=moms, wd=wd)
learn.save('first')
#learn.load('first')
learn.freeze_to(-2)
lr/=2
learn.fit_one_cycle(1,slice(lr/(2.6*4), lr), moms=moms, wd=wd)
learn.save('second')
#learn.load('second')
learn.freeze_to(-3)
lr /=2
learn.fit_one_cycle(1,slice(lr/(2.6*4), lr), moms=moms, wd=wd)
learn.unfreeze()
lr /=5
learn.fit_one_cycle(3, slice(lr/(2.6**4),lr), moms=moms, wd=wd)
# Our model has now been trained, lets try to get a prediction for the same custom input we gave earlier.
learn.predict("This airline is so bad! Never flying with them again!")
# We see that this model performs way better than our conventional language model. And hence we can now evaluate it's performance on the test set.
# ### Evaluate performance on test set.
print(test)
test['airline_sentiment'].value_counts().plot(kind='bar')
test['airline_sentiment'].value_counts()
# **Predicting the sentiments of tweets in the test set**
test['predicted']=test['text'].apply(lambda row: str(learn.predict(row)[0]))
print(test)
test.info()
# ## **Evaluation**
#
# Evaluation metric used - Accuracy
# As the predicted sentiments would be either positive, negative or neutral, we can easily compare the predictions to the labels provided in the test set and check the number of times our model makes the correct prediction.
print("Test Accuracy: ", accuracy_score(test['airline_sentiment'], test['predicted']))
# **We see that our model gives us an accuracy of over 80% on the test set, which is quite good, considering the kind of data set and its anomalies.**
test.head(25)
# **Now we plot the confusion matrix to see what the areas of misclassifiacation are and the reason for their existence.**
def plot_matrix(y_true, y_pred, classes, normalize = False, title = None, cmap=plt.cm.Blues):
"""
This function plots the confusion matrix.
Normalization can be applied by setting the Normalize parameter
"""
if not title:
if normalize:
title = "Normalized Confusion Matrix"
else:
title='Not Normalized Confusion Matrix'
confusion = confusion_matrix(y_true, y_pred)
fig,ax = plt.subplots()
im = ax.imshow(confusion, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
#display all ticks
ax.set(xticks = np.arange(confusion.shape[1]),
yticks = np.arange(confusion.shape[0]),
xticklabels = classes, yticklabels = classes,
title = title,
ylabel = 'True Label',
xlabel = 'Predicted Label')
# display the count annotations by looping
dtype = '.2f' if normalize else 'd'
thresh = confusion.max() / 2.
for i in range(confusion.shape[0]):
for j in range(confusion.shape[1]):
ax.text(j,i, format(confusion[i,j],dtype),
ha = 'center',va = 'center',
color = 'white' if confusion[i,j]> thresh else 'black')
fig.tight_layout()
return ax
plot_matrix(test['airline_sentiment'], test['predicted'], classes=['negative','neutral','positive'], title = 'Airline Tweets Sentiment Analysis Confusion Matrix')
plt.show()
# **True negatives - ** Have a very high prediction accuracy.
# The model has a tough time classifying neutral tweets, often branding them as negative despite having an overall accuracy of over 80%.
#
# But in our dataset, some of the tweets carry negative sentiment aimed at other twitter users instead of the airline. In addition, there are other things to consider with informal writing such as sarcasm, and improper gramatical structures that might en up confusing our model, leading to inaccurate predictions.
#
# Moreover some of the negative tweets have been mislabelled in the dataset as neutral and vice versa, which can be another factor hampering the accuracy of our model.
#
# The overall accuracy is good for true positives, but a vast majority of positive tweets get classified as negative. Lets look into them in more detail.
test.loc[(test['airline_sentiment']=='positive') & (test['predicted']=='negative')]
# It cannot be immediately pointed out as to what the common thread was amidst all the above tweets that led to their misclassification, but possible causes could be gramatical errors, mis-spellings and deriving inacurate information from hashtags.
# ### Conclusion and scope of improvement
#
# In the future it could be a good idea to spend more time on feature engineering to account for improper grammatical structures and mis-spellings while fitting the language model, so that the model hence formulated is more suited to the vocabulary of the language used in informal writings like tweets.
# Even as humans, it is sometimes hard to judge the sentiment of a given tokenized piece of text and hence our model does a good job in predicting the labels with about 82% accuracy.
# One possible way of handling this issue could be to make use of a much larger corpus with maximum possible training examples that are likely to be encountered in tweets when developing the initial language model.
#
# **Thankyou**
#
# *****************************************
| Sentiment Analysis of Airline Tweets/US Airlines Sentiment Analysis (ULMFIT Method).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import itertools
import os
import shutil
import random
import glob
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action = 'ignore', category = FutureWarning)
# %matplotlib inline
# ## Data Preperation
pwd # In order to see current directory of jupyter notebook
# +
# Organize data into train, valid, test dirs
os.chdir('dogs-vs-cats')
if os.path.isdir('train/dog') is False:
os.makedirs('train/dog')
os.makedirs('train/cat')
os.makedirs('valid/dog')
os.makedirs('valid/cat')
os.makedirs('test/dog')
os.makedirs('test/cat')
for c in random.sample(glob.glob('cat*'), 500):
shutil.move(c, 'train/cat')
for c in random.sample(glob.glob('dog*'), 500):
shutil.move(c, 'train/dog')
for c in random.sample(glob.glob('cat*'), 100):
shutil.move(c, 'valid/cat')
for c in random.sample(glob.glob('dog*'), 100):
shutil.move(c, 'valid/dog')
for c in random.sample(glob.glob('cat*'), 50):
shutil.move(c, 'test/cat')
for c in random.sample(glob.glob('dog*'), 50):
shutil.move(c, 'test/dog')
os.chdir('../../')
# -
train_path = 'Muiz Alvi/dogs-vs-cats/train/'
valid_path = 'Muiz Alvi/dogs-vs-cats/valid/'
test_path = 'Muiz Alvi/dogs-vs-cats/test/'
train_batches = ImageDataGenerator(preprocessing_function = tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory = train_path, target_size = (244, 244), classes = ['cat', 'dog'], batch_size = 10)
valid_batches = ImageDataGenerator(preprocessing_function = tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory = valid_path, target_size = (244, 244), classes = ['cat', 'dog'], batch_size = 10)
test_batches = ImageDataGenerator(preprocessing_function = tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory = test_path, target_size = (244, 244), classes = ['cat', 'dog'], batch_size = 10, shuffle = False)
assert train_batches.n == 1000
assert valid_batches.n == 200
assert test_batches.n == 100
assert train_batches.num_classes == valid_batches.num_classes == test_batches.num_classes == 2
imgs, labels = next(train_batches)
# function to plot images in a 1 by 10 grid taken directly from Tensor Flow's website. url: https://www.tensorflow.org/tutorials/images/classification#visualize_training_images
def plotImages(images_arr):
fig, axes = plt.subplots(1, 10, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
plotImages(imgs)
print(labels) # [0. 1.] for dog, [1. 0.] for cat
# ## Build and Train CNN
model = Sequential([
Conv2D(filters = 32, kernel_size = (3,3), activation = 'relu', padding = 'same', input_shape = (244,244,3)),
MaxPool2D(pool_size = (2,2), strides = 2),
Conv2D(filters = 64, kernel_size = (3, 3), activation = 'relu', padding = 'same'),
Flatten(),
Dense(units = 2, activation = 'softmax')
])
model.summary()
model.compile(optimizer = Adam(learning_rate = 0.0001), loss='categorical_crossentropy', metrics = ['accuracy'])
model.fit(x = train_batches, validation_data = valid_batches, epochs = 10, verbose = 2)
| Cat and Dog Classifier - Convolution Neural Network .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # MPG Cars
# ### Introduction:
#
# The following exercise utilizes data from [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Auto+MPG)
#
# ### Step 1. Import the necessary libraries
# ### Step 2. Import the first dataset [cars1](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/05_Merge/Auto_MPG/cars1.csv) and [cars2](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/05_Merge/Auto_MPG/cars2.csv).
# ### Step 3. Assign each to a variable called cars1 and cars2
# ### Step 4. Oops, it seems our first dataset has some unnamed blank columns, fix cars1
# ### Step 5. What is the number of observations in each dataset?
# ### Step 6. Join cars1 and cars2 into a single DataFrame called cars
# ### Step 7. Oops, there is a column missing, called owners. Create a random number Series from 15,000 to 73,000.
# ### Step 8. Add the column owners to cars
| 05_Merge/Auto_MPG/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose([
transforms.ToTensor()])
train = torchvision.datasets.MNIST(
root="data/train", train=True, transform=transform, target_transform=None, download=True)
test = torchvision.datasets.MNIST(
root="data/test", train=False, transform=transform, target_transform=None, download=True)
# +
from matplotlib import pyplot as plt
import numpy as np
print(train.data.size())
print(test.data.size())
img = train.data[0].numpy()
plt.imshow(img, cmap='gray')
print('Label:', train.targets[0])
# +
train_data_resized = train.data.numpy() #torchテンソルからnumpyに
test_data_resized = test.data.numpy()
train_data_resized = torch.FloatTensor(np.stack((train_data_resized,)*3, axis=1)) #RGBに変換
test_data_resized = torch.FloatTensor(np.stack((test_data_resized,)*3, axis=1))
print(train_data_resized.size())
# +
import torch.utils.data as data
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
#画像の前処理
class ImgTransform():
def __init__(self):
self.transform = transforms.Compose([
transforms.ToTensor(), # テンソル変換
transforms.Normalize(mean, std) # 標準化
])
def __call__(self, img):
return self.transform(img)
#Datasetクラスを継承
class _3ChannelMnistDataset(data.Dataset):
def __init__(self, img_data, target, transform):
#[データ数,高さ,横,チャネル数]に
self.data = img_data.numpy().transpose((0, 2, 3, 1)) /255
self.target = target
self.img_transform = transform #画像前処理クラスのインスタンス
def __len__(self):
#画像の枚数を返す
return len(self.data)
def __getitem__(self, index):
#画像の前処理(標準化)したデータを返す
img_transformed = self.img_transform(self.data[index])
return img_transformed, self.target[index]
# +
train_dataset = _3ChannelMnistDataset(train_data_resized, train.targets, transform=ImgTransform())
test_dataset = _3ChannelMnistDataset(test_data_resized, test.targets, transform=ImgTransform())
# データセットをテストしてみる
index = 0
print(train_dataset.__getitem__(index)[0].size())
print(train_dataset.__getitem__(index)[1])
print(train_dataset.__getitem__(index)[0][1]) #ちゃんと標準化されていることがわかる
# -
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)
# +
from torch import nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(3)
self.conv = nn.Conv2d(3, 10, kernel_size=4)
self.fc1 = nn.Linear(640, 300)
self.fc2 = nn.Linear(300, 100)
self.fc3 = nn.Linear(100, 10)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.pool(x)
x = x.view(x.size()[0], -1) #行列を線形処理できるようにベクトルに(view(高さ、横))
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
model = Model()
print(model)
# +
import tqdm
from torch import optim
# 推論モード
def eval_net(net, data_loader, device="cpu"): #GPUある人はgpuに
#推論モードに
net.eval()
ypreds = [] #予測したラベル格納変数
for x, y in (data_loader):
# toメソッドでデバイスに転送
x = x.to(device)
y = [y.to(device)]
# 確率が最大のクラスを予測
# forwardプロパゲーション
with torch.no_grad():
_, y_pred = net(x).max(1)
ypreds.append(y_pred)
# ミニバッチごとの予測を一つのテンソルに
y = torch.cat(y)
ypreds = torch.cat(ypreds)
# 予測値を計算(正解=予測の要素の和)
acc = (y == ypreds).float().sum()/len(y)
return acc.item()
# 訓練モード
def train_net(net, train_loader, test_loader,optimizer_cls=optim.Adam,
loss_fn=nn.CrossEntropyLoss(),n_iter=4, device="cpu"):
train_losses = []
train_acc = []
eval_acc = []
optimizer = optimizer_cls(net.parameters())
for epoch in range(n_iter): #4回回す
runnig_loss = 0.0
# 訓練モードに
net.train()
n = 0
n_acc = 0
for i, (xx, yy) in tqdm.tqdm(enumerate(train_loader),
total=len(train_loader)):
xx = xx.to(device)
yy = yy.to(device)
output = net(xx)
loss = loss_fn(output, yy)
optimizer.zero_grad() #optimizerの初期化
loss.backward() #損失関数(クロスエントロピー誤差)からバックプロパゲーション
optimizer.step()
runnig_loss += loss.item()
n += len(xx)
_, y_pred = output.max(1)
n_acc += (yy == y_pred).float().sum().item()
train_losses.append(runnig_loss/i)
# 訓練データの予測精度
train_acc.append(n_acc / n)
# 検証データの予測精度
eval_acc.append(eval_net(net, test_loader, device))
# このepochでの結果を表示
print("epoch:",epoch+1, "train_loss:",train_losses[-1], "train_acc:",train_acc[-1],
"eval_acc:",eval_acc[-1], flush=True)
# -
eval_net(model, test_loader)
train_net(model, train_loader, test_loader)
data = train_dataset.__getitem__(0)[0].reshape(1, 3, 28, 28) #リサイズ(データローダーのサイズに注意)
print("ラベル",train_dataset.__getitem__(0)[1].data)
model.eval()
output = model(data)
print(output.size())
output
# モデルの保存
model.eval()
#サンプル入力サイズ
example = torch.rand(1, 3, 28, 28)
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("./CNNModel.pt")
print(model)
| Pytorch_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Objective: Learn to do clustering and noise reduction in data using PCA
# +
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import svd
from sklearn.datasets import load_digits
digits = load_digits()
# -
# ## PCA using SVD
def PCA(X, do_mean_centering=False):
if do_mean_centering:
X_mean_centered = np.zeros_like(X)
for col in range(X_mean_centered.shape[1]):
X_col = X[:, col]
X_mean_centered[:, col] = X_col - np.mean(X_col)
X = X_mean_centered
U, S, PT = svd(X, full_matrices=False)
Sigma = np.diag(S)
T = np.dot(U, Sigma)
return T, PT.T, Sigma # Score, Loadings, Variance
def plot_digits(data):
fig, axes = plt.subplots(
4, 10, figsize=(10, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1)
)
for i, ax in enumerate(axes.flat):
ax.imshow(
data[i].reshape(8, 8),
cmap='binary', interpolation='nearest', clim=(0, 16)
)
# +
# Find out the original dimension of the data
X = digits.data
y = digits.target
print("Shape of X", X.shape)
print("Shape of y", y.shape)
# -
# Visualize the original data
plot_digits(X)
# ### Task 1: Dimensionality reduction: Conduct PCA on the the matrix $X$ to find out the dimension required to capture 80% of the variance
# +
# TODO: Make plots comparing normalization to not
# Get variance explained by singular values
def conduct_PCA(X):
n_samples = X.shape[0]
T, P, Sigma = PCA(X)
# Compute sample variance
explained_variance = (Sigma ** 2) / (n_samples - 1)
total_variance = explained_variance.sum()
explained_variance_ratio = explained_variance / total_variance
#cumulative_explained_variance = np.cumsum(explained_variance_ratio)
#explained_variance_threshold = 0.8
#required_dimensions = np.argmax(cumulative_explained_variance > explained_variance_threshold)
component_wise_sum = explained_variance_ratio.sum(axis=0)
cumulative_explained_variance = np.cumsum(component_wise_sum)
explained_variance_threshold = 0.8
required_dimensions = np.argmax(cumulative_explained_variance > explained_variance_threshold)
plt.figure()
plt.plot(cumulative_explained_variance)
plt.grid()
plt.xlabel('Number of components')
plt.ylabel('Cumulative explained variance');
print(f'Required components/dimensions to explain {explained_variance_threshold*100}% variance: {required_dimensions}')
conduct_PCA(X)
conduct_PCA(X / np.linalg.norm(X))
# -
# Using the "eyeballing" method to pinpoint the necessary dimension to capture a given amount of variance is not very precise when the dimensionality of the underlying data is *larger than it is here*. For data sets with eg. 100+ dimensions we could solve for the exact dimension that exceeds the set explained variance, say 80%, as
#
# ```
# cumulative_explained = np.cumsum(explained_variance_ratio)
# n_required_dims = np.argmax(cumulative_explained > 0.8)
# ```
# We can also specify the required amount of explained variance if we use e.g. sklearn's PCA implementation.
# ### Task 2: Clustering: Project the original data matrix X on the first two PCs and draw the scalar plot
# +
# Need mean centered data here to make more sense out of the plots
# Clustering without mean centering yields more overlap.
# Columns of T = U@S are principle components
T, _, _ = PCA(X, do_mean_centering=True)
t1 = T[:,0]
t2 = T[:,1]
plt.figure(figsize=(15, 10))
plt.scatter(
t1, t2, c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('Spectral', 10)
)
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
# -
# ### Task 3: Denoising: Remove noise from the noisy data
# +
# Adding noise to the original data
X = digits.data
y = digits.target
np.random.seed(42)
noisy = np.random.normal(X, 4)
plot_digits(noisy)
# -
# Tips:
#
# * Decompose the noisy data using PCA
# * Reconstruct the data using just a few dominant components. For eg. check the variance plot
#
# Since the nature of the noise is more or less similar across all the digits, they are not the fearues with enough variance to discriminate between the digits.
# +
def denoise_signal(signal, do_mean_centering):
T, P, Sigma = PCA(noisy, do_mean_centering)
# Select how many components to use
explained_variance = (Sigma ** 2) / (signal.shape[0] - 1)
total_variance = explained_variance.sum()
explained_variance_ratio = explained_variance / total_variance
component_wise_sum = explained_variance_ratio.sum(axis=0)
cumulative_explained_variance = np.cumsum(component_wise_sum)
explained_variance_threshold = 0.8
required_dimensions = np.argmax(cumulative_explained_variance > explained_variance_threshold)
print(required_dimensions)
n_components = required_dimensions
# Project data down to n principle components, then reconstruct the data based on this.
noise_reduced = T[:, :n_components]
noise_reconstructed = noise_reduced @ P[:, :n_components].T
return noise_reconstructed
plot_digits(denoise_signal(noise, do_mean_centering=False))
# -
# ### Task 4: Study the impact of normalization of the dataset before conducting PCA. Discuss if it is critical to normalize this particular data compared to the dataset in other notebooks
# +
normalized_signal = noise / np.linalg.norm(noise)
plot_digits(denoise_signal(normalized_signal, do_mean_centering=False))
# Normalizing here does not yield any noticable improvement, if any.
# This is likely because of how each column in the dataset represent some
# part of the same source (image of the digits), and thus we should expect
# similar properties between each column. I.e we are not necessarily comparing apples and oranges :)
# See also Task 1 - normalization does not change anything.
# -
# ## All the above excercises can be done using the sklearn library as follows
# +
from sklearn.decomposition import PCA
X = digits.data
y = digits.target
# +
pca = PCA(2) # project from 64 to 2 dimensions
projected = pca.fit_transform(digits.data)
print(digits.data.shape)
print(projected.shape)
plot_digits(digits.data)
# -
plt.figure(figsize=(15,10))
plt.scatter(
projected[:, 0], projected[:, 1],
c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('Spectral', 10)
)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar();
# +
pca = PCA().fit(X)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
# +
np.random.seed(42)
noisy = np.random.normal(digits.data, 4)
plot_digits(noisy)
# +
pca = PCA(0.50).fit(noisy) # 50% of the variance amounts to 12 principal components.
pca.n_components_
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
# +
components = pca.transform(noisy)
filtered = pca.inverse_transform(components)
plot_digits(filtered)
# -
| assignment_04_PCA_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9 (tensorflow)
# language: python
# name: tensorflow
# ---
# + [markdown] id="0joqdbKedFtm"
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_5_tabular_synthetic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Or3fhGk9dFtn"
# # T81-558: Applications of Deep Neural Networks
# **Module 7: Generative Adversarial Networks**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# + [markdown] id="W-QArqZxdFto"
# # Module 7 Material
#
# * Part 7.1: Introduction to GANs for Image and Data Generation [[Video]](https://www.youtube.com/watch?v=hZw-AjbdN5k&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_1_gan_intro.ipynb)
# * Part 7.2: Train StyleGAN3 with your Own Images [[Video]](https://www.youtube.com/watch?v=R546LYsQk5M&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_2_train_gan.ipynb)
# * Part 7.3: Exploring the StyleGAN Latent Vector [[Video]](https://www.youtube.com/watch?v=goQzp8QSb2s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_3_latent_vector.ipynb)
# * Part 7.4: GANs to Enhance Old Photographs Deoldify [[Video]](https://www.youtube.com/watch?v=0OTd5GlHRx4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_4_deoldify.ipynb)
# * **Part 7.5: GANs for Tabular Synthetic Data Generation** [[Video]](https://www.youtube.com/watch?v=yujdA46HKwA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_5_tabular_synthetic.ipynb)
#
# + [markdown] id="Zn-FViihdN1M"
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
# Running the following code will map your GDrive to ```/content/drive```.
# + colab={"base_uri": "https://localhost:8080/"} id="f7G_GEwHdOrE" outputId="020e24de-efe3-4b95-88aa-03430c473cfa"
try:
from google.colab import drive
COLAB = True
print("Note: using Google CoLab")
# %tensorflow_version 2.x
except:
print("Note: not using Google CoLab")
COLAB = False
# + [markdown] id="aCb4iUtAdFto"
# # Part 7.5: GANs for Tabular Synthetic Data Generation
#
# Typically GANs are used to generate images. However, we can also generate tabular data from a GAN. In this part, we will use the Python tabgan utility to create fake data from tabular data. Specifically, we will use the Auto MPG dataset to train a GAN to generate fake cars. [Cite:ashrapov2020tabular](https://arxiv.org/pdf/2010.00638.pdf)
#
# ## Installing Tabgan
#
# Pytorch is the foundation of the tabgan neural network utility. The following code installs the needed software to run tabgan in Google Colab.
# + colab={"base_uri": "https://localhost:8080/"} id="5-iTPkSWdsGa" outputId="bfd5ee3e-feb9-4a40-c5ad-3540ae4f8350"
# HIDE OUTPUT
CMD = "wget https://raw.githubusercontent.com/Diyago/"\
"GAN-for-tabular-data/master/requirements.txt"
# !{CMD}
# !pip install -r requirements.txt
# !pip install tabgan
# + [markdown] id="HlETatByeGqz"
# Note, after installing; you may see this message:
#
# * You must restart the runtime in order to use newly installed versions.
#
# If so, click the "restart runtime" button just under the message. Then rerun this notebook, and you should not receive further issues.
#
# ## Loading the Auto MPG Data and Training a Neural Network
#
# We will begin by generating fake data for the Auto MPG dataset we have previously seen. The tabgan library can generate categorical (textual) and continuous (numeric) data. However, it cannot generate unstructured data, such as the name of the automobile. Car names, such as "AMC Rebel SST" cannot be replicated by the GAN, because every row has a different car name; it is a textual but non-categorical value.
#
# The following code is similar to what we have seen before. We load the AutoMPG dataset. The tabgan library requires Pandas dataframe to train. Because of this, we keep both the Pandas and Numpy values.
# + colab={"base_uri": "https://localhost:8080/"} id="-YRAjvvMeWuz" outputId="d819599f-8023-434c-fa9a-fd8df6935132"
# HIDE OUTPUT
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
import pandas as pd
import io
import os
import requests
import numpy as np
from sklearn import metrics
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/auto-mpg.csv",
na_values=['NA', '?'])
COLS_USED = ['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin','mpg']
COLS_TRAIN = ['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']
df = df[COLS_USED]
# Handle missing value
df['horsepower'] = df['horsepower'].fillna(df['horsepower'].median())
# Split into training and test sets
df_x_train, df_x_test, df_y_train, df_y_test = train_test_split(
df.drop("mpg", axis=1),
df["mpg"],
test_size=0.20,
#shuffle=False,
random_state=42,
)
# Create dataframe versions for tabular GAN
df_x_test, df_y_test = df_x_test.reset_index(drop=True), \
df_y_test.reset_index(drop=True)
df_y_train = pd.DataFrame(df_y_train)
df_y_test = pd.DataFrame(df_y_test)
# Pandas to Numpy
x_train = df_x_train.values
x_test = df_x_test.values
y_train = df_y_train.values
y_test = df_y_test.values
# Build the neural network
model = Sequential()
# Hidden 1
model.add(Dense(50, input_dim=x_train.shape[1], activation='relu'))
model.add(Dense(25, activation='relu')) # Hidden 2
model.add(Dense(12, activation='relu')) # Hidden 2
model.add(Dense(1)) # Output
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto',
restore_best_weights=True)
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor], verbose=2,epochs=1000)
# + [markdown] id="YeR9CQ5weQDB"
# We now evaluate the trained neural network to see the RMSE. We will use this trained neural network to compare the accuracy between the original data and the GAN-generated data. We will later see that you can use such comparisons for anomaly detection. We can use this technique can be used for security systems. If a neural network trained on original data does not perform well on new data, then the new data may be suspect or fake.
# + colab={"base_uri": "https://localhost:8080/"} id="WFijxBaufVzr" outputId="1a980286-e40b-4800-becd-cdba31979c8a"
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
# + [markdown] id="0k33foL3eTDN"
# ## Training a GAN for Auto MPG
#
# Next, we will train the GAN to generate fake data from the original MPG data. There are quite a few options that you can fine-tune for the GAN. The example presented here uses most of the default values. These are the usual hyperparameters that must be tuned for any model and require some experimentation for optimal results. To learn more about tabgab refer to its paper or this [Medium article](https://towardsdatascience.com/review-of-gans-for-tabular-data-a30a2199342), written by the creator of tabgan.
# + colab={"base_uri": "https://localhost:8080/", "height": 81, "referenced_widgets": ["4868c1e7b0c943b594bc1ecad46db436", "6ead85f553054e4aa116920a40e49b04", "a9f4fb7eacb94aafbf64a98b5fc0fc37", "3c26c587accb4c26b0b98221b547356f", "39885cd66caa4fe79fc53f2368d7a5c0", "5ecaf538dd5744198cedd271a43a6d0f", "9030dbab18ec43f481bfc088de9447ec", "5dedd3556fd54bf58eef12635398021b", "c993e9cdf47c4c6799405a6d628128b4", "<KEY>", "<KEY>", "d778ac7cdd1e4d18b31a2a85d296a1c6", "3bb0052560414e108e0e966b36739768", "a9ef13d5399a4eb2afee41204ed24c54", "7202f83df3894af7add22a1a074617ed", "<KEY>", "e7a881fa8d964ff2ad0a2137cabce76d", "<KEY>", "<KEY>", "783fede137ea4452a39df668eb12a411", "<KEY>", "a054e62a36cc484e9b1554ed37194876"]} id="L-i4CdwYkgLU" outputId="599c8605-9570-4436-c3d8-393d88aa2f9f"
from tabgan.sampler import GANGenerator
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
gen_x, gen_y = GANGenerator(gen_x_times=1.1, cat_cols=None,
bot_filter_quantile=0.001, top_filter_quantile=0.999, \
is_post_process=True,
adversarial_model_params={
"metrics": "rmse", "max_depth": 2, "max_bin": 100,
"learning_rate": 0.02, "random_state": \
42, "n_estimators": 500,
}, pregeneration_frac=2, only_generated_data=False,\
gan_params = {"batch_size": 500, "patience": 25, \
"epochs" : 500,}).generate_data_pipe(df_x_train, df_y_train,\
df_x_test, deep_copy=True, only_adversarial=False, \
use_adversarial=True)
# + [markdown] id="qBxYegwNdXdz"
# Note: if you receive an error running the above code, you likely need to restart the runtime. You should have a "restart runtime" button in the output from the second cell. Once you restart the runtime, rerun all of the cells. This step is necessary as tabgan requires specific versions of some packages.
#
# ## Evaluating the GAN Results
#
# If we display the results, we can see that the GAN-generated data looks similar to the original. Some values, typically whole numbers in the original data, have fractional values in the synthetic data.
# + colab={"base_uri": "https://localhost:8080/", "height": 423} id="CzKROV-Pm1SE" outputId="2ddf9726-6074-41e6-82bd-a8512f493c5e"
gen_x
# + [markdown] id="RQ6lc2EHn8i5"
# Finally, we present the synthetic data to the previously trained neural network to see how accurately we can predict the synthetic targets. As we can see, you lose some RMSE accuracy by going to synthetic data.
# + colab={"base_uri": "https://localhost:8080/"} id="BXoMORyHCU0o" outputId="21196542-b7e4-4c72-cd47-5f10ec96533b"
# Predict
pred = model.predict(gen_x.values)
score = np.sqrt(metrics.mean_squared_error(pred,gen_y.values))
print("Final score (RMSE): {}".format(score))
| t81_558_class_07_5_tabular_synthetic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# label_csv = np.loadtxt(open('/content/gdrive/My Drive/Colab/Data/Labels.csv'\
# ,"rb"),delimiter=",",skiprows=0)
label_column = pd.read_csv('F:\DATA\Journal_Date/21scenes/feature_generation_data_no_GT_arbitrary_samples_20190509/18_scenes/Labels.csv',
header=None, usecols =[0])
label = label_column.values.ravel()
print(label)
print(label.shape)
feature = pd.read_csv('F:\DATA\Journal_Date/21scenes/feature_generation_data_no_GT_arbitrary_samples_20190509/18_scenes/Sample Data.csv',
header=None, usecols=range(1,173))
# print(feature)
num_scene= 18
# +
import datetime
starttime = datetime.datetime.now()
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import learning_curve, GridSearchCV
from sklearn.model_selection import cross_validate
from sklearn import metrics
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
n_estimators_grid = {'n_estimators':range(50,351,25)}
GS_n_estimator = GridSearchCV(estimator = RandomForestClassifier(min_samples_split=2,
min_samples_leaf=1, max_depth=10, max_features='sqrt' ,random_state=0),
n_jobs = -1, param_grid = n_estimators_grid, scoring = 'accuracy', cv = KFold(n_splits=num_scene, random_state=0))
GS_n_estimator.fit(feature, label)
print("Best parameters set found on development set:")
print()
print(GS_n_estimator.best_params_)
means = GS_n_estimator.cv_results_['mean_test_score']
stds = GS_n_estimator.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, GS_n_estimator.cv_results_['params']):
print("%0.4f (+/-%0.04f) for %r"
% (mean, std * 2, params))
# GS_n_estimator.cv_results_ , GS_n_estimator.best_params_, GS_n_estimator.best_score_
# RFmodel = RandomForestClassifier(n_estimators = 200, oob_score = "True", n_jobs = -1, max_depth = 10,
# max_features = "auto", min_samples_leaf = 1, min_samples_split = 10)
# RFmodel.fit(feature, label)
# print(RFmodel.oob_score_)
endtime = datetime.datetime.now()
print("runtime: ", (endtime - starttime))
# -
| Feature_selection_using_rbf_kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/WillSmoka/FunctionHacker/blob/master/Copy_of_Real_Time_Voice_Cloning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6yk3PMfBuZhS" colab_type="text"
# Make sure GPU is enabled
# Runtime -> Change Runtime Type -> Hardware Accelerator -> GPU
# + id="qhunyJSod_UT" colab_type="code" outputId="c98af496-a0bf-4db0-c5f1-6a56dca0e767" colab={"base_uri": "https://localhost:8080/", "height": 104}
# Clone git repo
# !git clone https://github.com/CorentinJ/Real-Time-Voice-Cloning.git
# + id="GneTTDCIs8TM" colab_type="code" outputId="db0d3fb9-ece6-4f7d-d870-793dd1318837" colab={"base_uri": "https://localhost:8080/", "height": 35}
# cd Real-Time-Voice-Cloning/
# + id="0AVd9vLKeKm6" colab_type="code" outputId="fe3cab31-ce06-4536-fa2b-a66886d7eb2e" colab={"base_uri": "https://localhost:8080/", "height": 363}
# Install dependencies
# !pip install -q -r requirements.txt
# !apt-get install -qq libportaudio2
# + id="VuwgOQlPeN8a" colab_type="code" outputId="093f66a0-dfe5-4fb6-df16-b1d4fb98853a" colab={"base_uri": "https://localhost:8080/", "height": 311}
# Download dataset
# !gdown https://drive.google.com/uc?id=1n1sPXvT34yXFLT47QZA6FIRGrwMeSsZc
# !unzip pretrained.zip
# + id="53CWMIQ-eZ-L" colab_type="code" colab={}
# Code for recording audio from the browser
from IPython.display import Javascript
from google.colab import output
from base64 import b64decode
import IPython
import uuid
from google.colab import output
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
class InvokeButton(object):
def __init__(self, title, callback):
self._title = title
self._callback = callback
def _repr_html_(self):
from google.colab import output
callback_id = 'button-' + str(uuid.uuid4())
output.register_callback(callback_id, self._callback)
template = """<button id="{callback_id}" style="cursor:pointer;background-color:#EEEEEE;border-color:#E0E0E0;padding:5px 15px;font-size:14px">{title}</button>
<script>
document.querySelector("#{callback_id}").onclick = (e) => {{
google.colab.kernel.invokeFunction('{callback_id}', [], {{}})
e.preventDefault();
}};
</script>"""
html = template.format(title=self._title, callback_id=callback_id)
return html
RECORD = """
const sleep = time => new Promise(resolve => setTimeout(resolve, time))
const b2text = blob => new Promise(resolve => {
const reader = new FileReader()
reader.onloadend = e => resolve(e.srcElement.result)
reader.readAsDataURL(blob)
})
var record = time => new Promise(async resolve => {
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
recorder = new MediaRecorder(stream)
chunks = []
recorder.ondataavailable = e => chunks.push(e.data)
recorder.start()
await sleep(time)
recorder.onstop = async ()=>{
blob = new Blob(chunks)
text = await b2text(blob)
resolve(text)
}
recorder.stop()
})
"""
def record(sec=3):
display(Javascript(RECORD))
s = output.eval_js('record(%d)' % (sec*1000))
b = b64decode(s.split(',')[1])
with open('audio.wav','wb+') as f:
f.write(b)
return 'audio.wav'
# + id="tDvZn-k9t3Eu" colab_type="code" outputId="008ab09e-323b-442f-b55e-fe8d5dc4dff2" colab={"base_uri": "https://localhost:8080/", "height": 104}
from IPython.display import Audio
from IPython.utils import io
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
import numpy as np
import librosa
encoder_weights = Path("encoder/saved_models/pretrained.pt")
vocoder_weights = Path("vocoder/saved_models/pretrained/pretrained.pt")
syn_dir = Path("synthesizer/saved_models/logs-pretrained/taco_pretrained")
encoder.load_model(encoder_weights)
synthesizer = Synthesizer(syn_dir)
vocoder.load_model(vocoder_weights)
# + id="PyLdbUfks2lv" colab_type="code" outputId="e27008ed-5469-4552-c0bb-c0c1e5905d42" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#@title Deep vocoder
def synth():
text = "rad" #@param {type:"string"}
num = 4 #@param {type:"integer"}
print("Now recording for 10 seconds, say what you will...")
record(10)
print("Audio recording complete")
in_fpath = Path("audio.wav")
reprocessed_wav = encoder.preprocess_wav(in_fpath)
original_wav, sampling_rate = librosa.load(in_fpath)
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
embed = encoder.embed_utterance(preprocessed_wav)
print("Synthesizing new audio...")
with io.capture_output() as captured:
specs = synthesizer.synthesize_spectrograms([text], [embed])
generated_wav = vocoder.infer_waveform(specs[0])
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
display(Audio(generated_wav, rate=synthesizer.sample_rate))
InvokeButton('Start recording', synth)
| Copy_of_Real_Time_Voice_Cloning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ![rmotr](https://user-images.githubusercontent.com/7065401/52071918-bda15380-2562-11e9-828c-7f95297e4a82.png)
# <hr style="margin-bottom: 40px;">
#
# <img src="https://user-images.githubusercontent.com/7065401/68501079-0695df00-023c-11ea-841f-455dac84a089.jpg"
# style="width:400px; float: right; margin: 0 40px 40px 40px;"></img>
#
# # Reading CSV and TXT files
#
# Rather than creating `Series` or `DataFrames` strutures from scratch, or even from Python core sequences or `ndarrays`, the most typical use of **pandas** is based on the loading of information from files or sources of information for further exploration, transformation and analysis.
#
# In this lecture we'll learn how to read comma-separated values files (.csv) and raw text files (.txt) into pandas `DataFrame`s.
# ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png)
#
# ## Hands on!
import pandas as pd
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Reading data with Python
#
# As we saw on previous courses we can read data simply using Python.
#
# When you want to work with a file, the first thing to do is to open it. This is done by invoking the `open()` built-in function.
#
# `open()` has a single required argument that is the path to the file and has a single return, the file object.
#
# The `with` statement automatically takes care of closing the file once it leaves the `with` block, even in cases of error.
# +
filepath = 'btc-market-price.csv'
with open(filepath, 'r') as reader:
print(reader)
# -
# Once the file is opened, we can read its content as follows:
# +
filepath = 'btc-market-price.csv'
with open(filepath, 'r') as reader:
for index, line in enumerate(reader.readlines()):
# read just the first 10 lines
if (index < 10):
print(index, line)
# -
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Reading data with Pandas
#
# Probably one of the most recurrent types of work for data analysis: public data sources, logs, historical information tables, exports from databases. So the pandas library offers us functions to read and write files in multiple formats like CSV, JSON, XML and Excel's XLSX, all of them creating a `DataFrame` with the information read from the file.
#
# We'll learn how to read different type of data including:
# - CSV files (.csv)
# - Raw text files (.txt)
# - JSON data from a file and from an API
# - Data from a SQL query over a database
#
# There are many other available reading functions as the following table shows:
#
# ![pandas read data table](https://user-images.githubusercontent.com/7065401/68400151-51d5c200-0156-11ea-9732-aa00400c8e4b.png)
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## The `read_csv` method
#
# The first method we'll learn is **read_csv**, that let us read comma-separated values (CSV) files and raw text (TXT) files into a `DataFrame`.
#
# The `read_csv` function is extremely powerful and you can specify a very broad set of parameters at import time that allow us to accurately configure how the data will be read and parsed by specifying the correct structure, enconding and other details. The most common parameters are as follows:
#
# - `filepath`: Path of the file to be read.
# - `sep`: Character(s) that are used as a field separator in the file.
# - `header`: Index of the row containing the names of the columns (None if none).
# - `index_col`: Index of the column or sequence of indexes that should be used as index of rows of the data.
# - `names`: Sequence containing the names of the columns (used together with header = None).
# - `skiprows`: Number of rows or sequence of row indexes to ignore in the load.
# - `na_values`: Sequence of values that, if found in the file, should be treated as NaN.
# - `dtype`: Dictionary in which the keys will be column names and the values will be types of NumPy to which their content must be converted.
# - `parse_dates`: Flag that indicates if Python should try to parse data with a format similar to dates as dates. You can enter a list of column names that must be joined for the parsing as a date.
# - `date_parser`: Function to use to try to parse dates.
# - `nrows`: Number of rows to read from the beginning of the file.
# - `skip_footer`: Number of rows to ignore at the end of the file.
# - `encoding`: Encoding to be expected from the file read.
# - `squeeze`: Flag that indicates that if the data read only contains one column the result is a Series instead of a DataFrame.
# - `thousands`: Character to use to detect the thousands separator.
# - `decimal`: Character to use to detect the decimal separator.
# - `skip_blank_lines`: Flag that indicates whether blank lines should be ignored.
#
# > Full `read_csv` documentation can be found here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html.
# In this case we'll try to read our `btc-market-price.csv` CSV file using different parameters to parse it correctly.
#
# This file contains records of the mean price of Bitcoin per date.
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Reading our first CSV file
#
# Everytime we call `read_csv` method, we'll need to pass an explicit `filepath` parameter indicating the path where our CSV file is.
#
# Any valid string path is acceptable. The string could be a URL. Valid URL schemes include HTTP, FTP, S3, and file. For file URLs, a host is expected. A local file could be: `file://localhost/path/to/table.csv`.
#
# For example we can use `read_csv` method to load data directly from an URL:
# +
csv_url = "https://raw.githubusercontent.com/datasets/gdp/master/data/gdp.csv"
pd.read_csv(csv_url).head()
# -
# Or just use a local file:
# +
df = pd.read_csv('btc-market-price.csv')
df.head()
# -
# In this case we let pandas infer everything related to our data, but in most of the cases we'll need to explicitly tell pandas how we want our data to be loaded. To do that we use parameters.
#
# Let's see how theses parameters work.
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## First row behaviour with `header` parameter
#
# The CSV file we're reading has only two columns: `Timestamp` and `Price`. It doesn't have a header. Pandas automatically assigned the first row of data as headers, which is incorrect. We can overwrite this behavior with the `header` parameter.
df = pd.read_csv('btc-market-price.csv',
header=None)
df.head()
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Missing values with `na_values` parameter
#
# We can define a `na_values` parameter with the values we want to be recognized as NA/NaN. In this case empty strings `''`, `?` and `-` will be recognized as null values.
df = pd.read_csv('btc-market-price.csv',
header=None,
na_values=['', '?', '-'])
df.head()
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Column names using `names` parameter
#
# We'll add that columns names using the `names` parameter.
df = pd.read_csv('btc-market-price.csv',
header=None,
na_values=['', '?', '-'],
names=['Timestamp', 'Price'])
df.head()
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Column types using `dtype` parameter
#
#
# Without using the `dtype` parameter pandas will try to figure it out the type of each column automatically. We can use `dtype` parameter to force pandas to use certain dtype.
#
# In this case we'll force the `Price` column to be `float`.
df = pd.read_csv('btc-market-price.csv',
header=None,
na_values=['', '?', '-'],
names=['Timestamp', 'Price'],
dtype={'Price': 'float'})
df.head()
df.dtypes
# The `Timestamp` column was interpreted as a regular string (`object` in pandas notation), we can parse it manually using a vectorized operation as we saw on previous courses.
#
# We'll parse `Timestamp` column to `Datetime` objects using `to_datetime` method:
pd.to_datetime(df['Timestamp']).head()
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
df.head()
df.dtypes
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Date parser using `parse_dates` parameter
#
# Another way of dealing with `Datetime` objects is using `parse_dates` parameter with the position of the columns with dates.
df = pd.read_csv('btc-market-price.csv',
header=None,
na_values=['', '?', '-'],
names=['Timestamp', 'Price'],
dtype={'Price': 'float'},
parse_dates=[0])
df.head()
df.dtypes
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Adding index to our data using `index_col` parameter
#
# By default, pandas will automatically assign a numeric autoincremental index or row label starting with zero. You may want to leave the default index as such if your data doesn’t have a column with unique values that can serve as a better index. In case there is a column that you feel would serve as a better index, you can override the default behavior by setting `index_col` property to a column. It takes a numeric value representing the index or a string of the column name for setting a single column as index or a list of numeric values or strings for creating a multi-index.
#
# In our data, we are choosing the first column, `Timestamp`, as index (index=0) by passing zero to the `index_col` argument.
df = pd.read_csv('btc-market-price.csv',
header=None,
na_values=['', '?', '-'],
names=['Timestamp', 'Price'],
dtype={'Price': 'float'},
parse_dates=[0],
index_col=[0])
df.head()
df.dtypes
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## A more challenging parsing
#
# Now we'll read another CSV file. This file has the following columns:
#
# - `first_name`
# - `last_name`
# - `age`
# - `math_score`
# - `french_score`
# - `next_test_date`
#
# Let's read it and see how it looks like.
exam_df = pd.read_csv('exam_review.csv')
exam_df
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Custom data delimiters using `sep` parameter
#
# We can define which delimiter to use by using the `sep` parameter. If we don't use the `sep` parameter, pandas will automatically detect the separator.
#
# In most of the CSV files separator will be comma (`,`) and will be automatically detected. But we can find files with other separators like semicolon (`;`), tabs (`\t`, specially on TSV files), whitespaces or any other special character.
#
# In this case the separator is a `>` character.
exam_df = pd.read_csv('exam_review.csv',
sep='>')
exam_df
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Custom data encoding
#
# Files are stored using different "encodings". You've probably heard about ASCII, UTF-8, latin1, etc.
#
# While reading data custom encoding can be defined with the `encoding` parameter.
#
# - `encoding='UTF-8'`: will be used if data is UTF-8 encoded.
# - `encoding='iso-8859-1'`: will be used if data is ISO/IEC 8859-1 ("extended ASCII") encoded.
#
# In our case we don't need a custom enconding as data is properly loaded.
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Custom numeric `decimal` and `thousands` character
#
# The decimal and thousands characters could change between datasets. If we have a column containing a comma (`,`) to indicate the decimal or thousands place, then this column would be considered a string and not numeric.
exam_df = pd.read_csv('exam_review.csv',
sep='>')
exam_df
exam_df[['math_score', 'french_score']].dtypes
# To solve that, ensuring such columns are interpreted as integer values, we'll need to use the `decimal` and/or `thousands` parameters to indicate correct decimal and/or thousands indicators.
exam_df = pd.read_csv('exam_review.csv',
sep='>',
decimal=',')
exam_df
exam_df[['math_score', 'french_score']].dtypes
# Let's see what happens with the `thousands` parameter:
pd.read_csv('exam_review.csv',
sep='>',
thousands=',')
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Excluding specific rows
#
# We can use the `skiprows` to:
#
# - Exclude reading specified number of rows from the beginning of a file, by passing an integer argument. **This removes the header too**.
# - Skip reading specific row indices from a file, by passing a list containing row indices to skip.
exam_df = pd.read_csv('exam_review.csv',
sep='>',
decimal=',')
exam_df
# To skip reading the first 2 rows from this file, we can use `skiprows=2`:
pd.read_csv('exam_review.csv',
sep='>',
skiprows=2)
# As the header is considered as the first row, to skip reading data rows 1 and 3, we can use `skiprows=[1,3]`:
exam_df = pd.read_csv('exam_review.csv',
sep='>',
decimal=',',
skiprows=[1,3])
exam_df
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Get rid of blank lines
#
# The `skip_blank_lines` parameter is set to `True` so blank lines are skipped while we read files.
#
# If we set this parameter to `False`, then every blank line will be loaded with `NaN` values into the `DataFrame`.
pd.read_csv('exam_review.csv',
sep='>',
skip_blank_lines=False)
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Loading specific columns
#
# We can use the `usecols` parameter when we want to load just specific columns and not all of them.
#
# Performance wise, it is better because instead of loading an entire dataframe into memory and then deleting the not required columns, we can select the columns that we’ll need, while loading the dataset itself.
#
# As a parameter to `usecols`, you can pass either a list of strings corresponding to the column names or a list of integers corresponding to column index.
pd.read_csv('exam_review.csv',
usecols=['first_name', 'last_name', 'age'],
sep='>')
# Or using just the column position:
pd.read_csv('exam_review.csv',
usecols=[0, 1, 2],
sep='>')
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Using a `Series` instead of `DataFrame`
#
# If the parsed data only contains one column then we can return a Series by setting the `squeeze` parameter to `True`.
exam_test_1 = pd.read_csv('exam_review.csv',
sep='>',
usecols=['last_name'])
type(exam_test_1)
exam_test_2 = pd.read_csv('exam_review.csv',
sep='>',
usecols=['last_name'],
squeeze=True)
type(exam_test_2)
# ![green-divider](https://user-images.githubusercontent.com/7065401/52071924-c003ad80-2562-11e9-8297-1c6595f8a7ff.png)
#
# ## Save to CSV file
#
# Finally we can also save our `DataFrame` as a CSV file.
exam_df
# We can simply generate a CSV string from our `DataFrame`:
exam_df.to_csv()
# Or specify a file path where we want our generated CSV code to be saved:
exam_df.to_csv('out.csv')
pd.read_csv('out.csv')
exam_df.to_csv('out.csv',
index=None)
pd.read_csv('out.csv')
# ![purple-divider](https://user-images.githubusercontent.com/7065401/52071927-c1cd7100-2562-11e9-908a-dde91ba14e59.png)
| Reading Data/lesson-1-reading-csv-and-txt-files/files/Lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import absolute_import, division, print_function
# # Some Notebook Possibilities
#
# http://mpld3.github.io/examples/linked_brush.html
# +
# uncomment the bottom line in this cell, change the final line of
# the loaded script to `mpld3.display()` (instead of show).
# +
# # %load http://mpld3.github.io/_downloads/linked_brush.py
# -
# +
# %%writefile ../example-script.py
# #!/bin/env python
from __future__ import absolute_import, division, print_function
print('Arbitrary code here.')
# -
# !cat ../example-script.py
| notebook-tutorial/notebooks/02-Visualization-and-code-organization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.013446, "end_time": "2021-02-16T00:16:17.746947", "exception": false, "start_time": "2021-02-16T00:16:17.733501", "status": "completed"} tags=[]
# ## Dependencies
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _kg_hide-output=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 6.625959, "end_time": "2021-02-16T00:16:24.385393", "exception": false, "start_time": "2021-02-16T00:16:17.759434", "status": "completed"} tags=[]
import warnings, glob
from tensorflow.keras import Sequential, Model
from cassava_scripts import *
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
# + [markdown] papermill={"duration": 0.011849, "end_time": "2021-02-16T00:16:24.409410", "exception": false, "start_time": "2021-02-16T00:16:24.397561", "status": "completed"} tags=[]
# ### Hardware configuration
# + _kg_hide-input=true papermill={"duration": 0.025526, "end_time": "2021-02-16T00:16:24.446765", "exception": false, "start_time": "2021-02-16T00:16:24.421239", "status": "completed"} tags=[]
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
# + [markdown] papermill={"duration": 0.012048, "end_time": "2021-02-16T00:16:24.471251", "exception": false, "start_time": "2021-02-16T00:16:24.459203", "status": "completed"} tags=[]
# # Model parameters
# + papermill={"duration": 0.018974, "end_time": "2021-02-16T00:16:24.502376", "exception": false, "start_time": "2021-02-16T00:16:24.483402", "status": "completed"} tags=[]
BATCH_SIZE = 8 * REPLICAS
HEIGHT = 512
WIDTH = 512
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 0 # Do TTA if > 0
# + [markdown] papermill={"duration": 0.012052, "end_time": "2021-02-16T00:16:24.526563", "exception": false, "start_time": "2021-02-16T00:16:24.514511", "status": "completed"} tags=[]
# # Augmentation
# + _kg_hide-input=false papermill={"duration": 0.019993, "end_time": "2021-02-16T00:16:24.558780", "exception": false, "start_time": "2021-02-16T00:16:24.538787", "status": "completed"} tags=[]
def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
return image, label
# + [markdown] papermill={"duration": 0.012112, "end_time": "2021-02-16T00:16:24.583226", "exception": false, "start_time": "2021-02-16T00:16:24.571114", "status": "completed"} tags=[]
# ## Auxiliary functions
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.024314, "end_time": "2021-02-16T00:16:24.619877", "exception": false, "start_time": "2021-02-16T00:16:24.595563", "status": "completed"} tags=[]
# Datasets utility functions
def resize_image(image, label):
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def process_path(file_path):
name = get_name(file_path)
img = tf.io.read_file(file_path)
img = decode_image(img)
# img, _ = scale_image(img, None)
# img = center_crop(img, HEIGHT, WIDTH)
return img, name
def get_dataset(files_path, shuffled=False, tta=False, extension='jpg'):
dataset = tf.data.Dataset.list_files(f'{files_path}*{extension}', shuffle=shuffled)
dataset = dataset.map(process_path, num_parallel_calls=AUTO)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(resize_image, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
# + [markdown] papermill={"duration": 0.012053, "end_time": "2021-02-16T00:16:24.644292", "exception": false, "start_time": "2021-02-16T00:16:24.632239", "status": "completed"} tags=[]
# # Load data
# + _kg_hide-input=true papermill={"duration": 0.049828, "end_time": "2021-02-16T00:16:24.706520", "exception": false, "start_time": "2021-02-16T00:16:24.656692", "status": "completed"} tags=[]
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
submission = pd.read_csv(f'{database_base_path}sample_submission.csv')
display(submission.head())
TEST_FILENAMES = tf.io.gfile.glob(f'{database_base_path}test_tfrecords/ld_test*.tfrec')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'GCS: test: {NUM_TEST_IMAGES}')
# + papermill={"duration": 0.66462, "end_time": "2021-02-16T00:16:25.384376", "exception": false, "start_time": "2021-02-16T00:16:24.719756", "status": "completed"} tags=[]
# !ls /kaggle/input/
# + _kg_hide-input=true papermill={"duration": 0.028255, "end_time": "2021-02-16T00:16:25.426473", "exception": false, "start_time": "2021-02-16T00:16:25.398218", "status": "completed"} tags=[]
model_path_list = glob.glob('/kaggle/input/157-cassava-leaf-effnetb4-drop-connect-rate-04-512/*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep='\n')
# + [markdown] papermill={"duration": 0.013947, "end_time": "2021-02-16T00:16:25.455471", "exception": false, "start_time": "2021-02-16T00:16:25.441524", "status": "completed"} tags=[]
# # Model
# + _kg_hide-output=true papermill={"duration": 6.106056, "end_time": "2021-02-16T00:16:31.575750", "exception": false, "start_time": "2021-02-16T00:16:25.469694", "status": "completed"} tags=[]
def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = tf.keras.applications.EfficientNetB4(input_tensor=inputs,
include_top=False,
drop_connect_rate=.4,
weights=None)
x = L.GlobalAveragePooling2D()(base_model.output)
x = L.Dropout(.5)(x)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
return model
with strategy.scope():
model = model_fn((None, None, CHANNELS), N_CLASSES)
model.summary()
# + [markdown] papermill={"duration": 0.024183, "end_time": "2021-02-16T00:16:31.625145", "exception": false, "start_time": "2021-02-16T00:16:31.600962", "status": "completed"} tags=[]
# # Test set predictions
# + _kg_hide-input=false papermill={"duration": 8.480603, "end_time": "2021-02-16T00:16:40.123435", "exception": false, "start_time": "2021-02-16T00:16:31.642832", "status": "completed"} tags=[]
files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros((test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True).repeat()
ct_steps = TTA_STEPS * ((test_size/BATCH_SIZE) + 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test) / len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8') for img, img_name in iter(test_names_ds.unbatch())]
# + _kg_hide-input=true papermill={"duration": 0.146981, "end_time": "2021-02-16T00:16:40.287793", "exception": false, "start_time": "2021-02-16T00:16:40.140812", "status": "completed"} tags=[]
submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head())
| Model backlog/Models/Inference/157-cassava-leaf-inf-effnetb4-drop-connect-rate-04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import spacy
import torchtext
from torchtext import data
from pathlib import Path
import pandas as pd
import spacy
torch.__version__
torchtext.__version__
# ## Анализ тональности текста
#
# датасет [sentiment140](http://help.sentiment140.com/for-students). Используем training.1600000.processed.noemoticon.csv
tweetsDF = pd.read_csv("input/training.1600000.processed.noemoticon.csv", engine="python", header=None)
tweetsDF.head(5)
tweetsDF[0].value_counts()
tweetsDF["sentiment_cat"] = tweetsDF[0].astype('category')
tweetsDF["sentiment"] = tweetsDF["sentiment_cat"].cat.codes
tweetsDF.to_csv("output/train-processed.csv", header=None, index=None)
tweetsDF.sample(10000).to_csv("output/train-processed-sample.csv", header=None, index=None)
tweetsDF.head(5)
# torchtext генерирует набор данных из json/csv с определенными типами параметров полей. [Подробнее тут](https://torchtext.readthedocs.io/en/latest/data.html#field)
#
# Нас интересуют только маркировки и текст твитов. Остальные поля мы дропнем. Метку определим как нетекстовое поле. Текст токенизируем с помощью spaCy и переведем в нижний регистр
#
# **Чтобы работать с английским текстом в spacy** потребуетсякачнуть локаль
#
# ```
# python -m spacy download en
# ```
# +
LABEL = data.LabelField()
TWEET = data.Field(tokenize='spacy', lower=True)
fields = [('score',None), ('id',None),('date',None),('query',None),
('name',None),
('tweet', TWEET),('category',None),('label',LABEL)]
# -
# При помощи TabularDataset применим определение полей к нашим данным
twitterDataset = torchtext.data.TabularDataset(
path="output/train-processed-sample.csv",
format="CSV",
fields=fields,
skip_header=False
)
# Посплитим на трейн, валид, тест
# +
train, test, valid = twitterDataset.split(
split_ratio=[0.6,0.2,0.2],
stratified=True,
strata_field='label'
)
print(len(train), len(test), len(valid))
# -
vars(train.examples[7])
# #### Построение словаря
#
# с помощью методв build_vocab с параметром max_size создаем словарь, ограничив его наиболее обзщеупотребительными (например 20тыс.) Torchtext добавить еще два токена <unk> для неизвестных слов и <pad> токен отступа, который будет использоваться для подгонки всего текста до одного размера, чтобы обеспечить пакетирование в pytorch. Можно задать и другие доп.токены, но они не включены по умолчанию.
vocab_size = 20000
TWEET.build_vocab(train, max_size = vocab_size)
LABEL.build_vocab(train)
len(TWEET.vocab)
TWEET.vocab.freqs.most_common(10)
# Наконец создадим загрузчик с помощью BucketIterator
device = "cuda"
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train, valid, test),
batch_size = 32,
device = device,
sort_key = lambda x: len(x.tweet),
sort_within_batch = False)
# ## Создание модели
#
# В первом слое создаем эмбеддинги - 300-мерный векторы. Затем следует LSTM со 100 скрытыми слоями. На выходе полносвязный слой с тремя классами (отрицательный, положительный или нейтральный твит).
# +
class OurFirstLSTM(nn.Module):
def __init__(self, hidden_size, embedding_dim, vocab_size):
super(OurFirstLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.encoder = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_size,
num_layers=1
)
self.predictor = nn.Linear(hidden_size, 2)
def forward(self, seq):
output, (hidden,_) = self.encoder(self.embedding(seq))
preds = self.predictor(hidden.squeeze(0))
return preds
model = OurFirstLSTM(100,300, 20002)
model.to(device)
# -
# ## Обучение
#
# Тут потребуется ссылаться на batch.tweet и batch.label, чтобы получить конкретные поля, которые нас интересуют.
# +
optimizer = optim.Adam(model.parameters(), lr=2e-2)
criterion = nn.CrossEntropyLoss()
def train(epochs, model, optimizer, criterion, train_iterator, valid_iterator):
for epoch in range(1, epochs + 1):
training_loss = 0.0
valid_loss = 0.0
model.train()
for batch_idx, batch in enumerate(train_iterator):
optimizer.zero_grad()
predict = model(batch.tweet)
loss = criterion(predict,batch.label)
loss.backward()
optimizer.step()
training_loss += loss.data.item() * batch.tweet.size(0)
training_loss /= len(train_iterator)
model.eval()
for batch_idx,batch in enumerate(valid_iterator):
predict = model(batch.tweet)
loss = criterion(predict,batch.label)
valid_loss += loss.data.item() * batch.tweet.size(0)
valid_loss /= len(valid_iterator)
print('Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}'.format(epoch, training_loss, valid_loss))
# -
train(10, model, optimizer, criterion, train_iterator, valid_iterator)
# ## Предсказания
#
# Недостаток torchtext - его сложно заставить делать предикшены. В нашем случае мы эмулируем конвеер обработки и делаем прогноз на выходе.
#
# В этой функции мы вызываем preprocess() , который делает токенизацию на основе SpaCy. Затем process() делает из токенов тензор, основываясь на построенном словаре (Torchtext ожидает пакет строк, поэтому необходимо передавать данные в функцию в виде списка спиосков). Затем мы отдаем все это в модель и создаем тензор, на сонове которого делаем предсказание.
def classify_tweet(tweet):
categories = {0: "Negative", 1:"Positive"}
processed = TWEET.process([TWEET.preprocess(tweet)])
processed = processed.to(device)
return categories[model(processed).argmax().item()]
tweetsDF[5][55]
# ## Аугментация данных
# #### Случайная вставка
#
# Случайное добавление не-стоп слов, синонимов существующих слов во фразу в кол-ве n-раз
import random
# Note: you'll have to define remove_stopwords() and get_synonyms() elsewhere
def random_insertion(sentence, n):
words = remove_stopwords(sentence)
for _ in range(n):
new_synonym = get_synonyms(random.choice(words))
sentence.insert(randrange(len(sentence) +1 ), new_synonym)
return sentence
# #### Случайное удаление
def random_deletion(words, p=0.5):
if len(words) == 1:
return words
remaining = list(filter(lambda x: random.uniform(0,1) > p, words))
if len(remaining) == 0:
return [random.choice(words)]
else:
return remaining
# #### Случайная перестановка
def random_swap(sentence, n=5):
length = range(len(sentence))
for _ in range(n):
idx1, idx2 = random.sample(length, 2)
sentence[idx1], sentence[idx2] = sentence[idx2], sentence[idx1]
return sentence
# Методы улучшают обучение модели примерно на 3%, но если маркированных примеров немного (500 и менее). Если их много, более 5000, то эффективность может снизится до 0,8% и меньше
# #### Обратный перевод
#
# переводим на один или несколько других языков, затем обратно
#
# googletrans (ограничения - до 15тыс.слов за раз. Для большого набора данных нужно слать данные пакетом, чтобы не получить бан по IP от гугла)
# +
import googletrans
translator = googletrans.Translator()
sentences = ['The cat sat on the mat']
translations_fr = translator.translate(sentences, dest='fr')
fr_text = [t.text for t in translations_fr]
translations_en = translator.translate(fr_text, dest='en')
en_text = [t.text for t in translations_en]
print(en_text)
# +
# с выбором языка случайным образом
available_langs = list(googletrans.LANGUAGES.keys())
tr_lang = random.choice(available_langs)
print(f"Translating to {googletrans.LANGUAGES[tr_lang]}")
translations = translator.translate(sentences, dest=tr_lang)
t_text = [t.text for t in translations]
print(t_text)
translations_en_random = translator.translate(t_text, src=tr_lang, dest='en')
en_text = [t.text for t in translations_en_random]
print(en_text)
# -
| pytorch-learning-texts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import math, os, sys
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
dataset = np.load('dataset/all_data-preprocessed.npz')
features, labels = dataset['features'].astype('float32'), dataset['labels'].astype('float32')
train_test_split_factor = .8
validation_split_factor = .2
train_x, train_y, test_x, test_y = features[:math.floor(len(features)*train_test_split_factor)], labels[:math.floor(len(labels)*train_test_split_factor)], features[math.floor(len(features)*train_test_split_factor):], labels[math.floor(len(labels)*train_test_split_factor):]
train_x, test_x = np.expand_dims(train_x, axis=-1), np.expand_dims(test_x, axis=-1) # for use with TimeDistributed
input_shape = train_x.shape
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
train_x = train_x.reshape(train_x.shape[0], 7).astype('float32')
test_x = test_x.reshape(test_x.shape[0], 7).astype('float32')
print(train_x.shape, test_x.shape)
model = RandomForestRegressor(n_estimators=200 ,max_depth=10,random_state=0)
model.fit(train_x, train_y)
pred = model.predict(test_x[:64])
close_pred = np.reshape(pred, (-1, 1))
test_y_reshape = np.reshape(test_y[:64], (-1, 1))
days = np.arange(1, len(test_y_reshape)+1)
plt.plot(days, test_y_reshape, 'b', label='Actual line')
plt.plot(days, close_pred, 'r', label='Predicted line')
plt.title('RFRegressor')
plt.xlabel('Days')
plt.ylabel('Close Prices')
plt.legend()
plt.show()
from sklearn.metrics import mean_squared_error as MSE
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('RMSE: {:0.4f}' .format(math.sqrt(MSE(test_y[:64], pred))))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy
accuracy = evaluate(model, test_x, test_y)
# save model
import joblib
joblib.dump(model, 'weights/rf.sav')
| rf_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Validation Playground
#
# **Watch** a [short tutorial video](https://greatexpectations.io/videos/getting_started/integrate_expectations) or **read** [the written tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data)
#
# #### This notebook assumes that you created at least one expectation suite in your project.
# #### Here you will learn how to validate data loaded into a Pandas DataFrame against an expectation suite.
#
#
# We'd love it if you **reach out for help on** the [**Great Expectations Slack Channel**](https://greatexpectations.io/slack)
import json
import great_expectations as ge
import great_expectations.jupyter_ux
from great_expectations.datasource.types import BatchKwargs
from datetime import datetime
# ## 1. Get a DataContext
# This represents your **project** that you just created using `great_expectations init`.
context = ge.data_context.DataContext()
# ## 2. Choose an Expectation Suite
#
# List expectation suites that you created in your project
context.list_expectation_suite_names()
expectation_suite_name = # TODO: set to a name from the list above
# ## 3. Load a batch of data you want to validate
#
# To learn more about `get_batch`, see [this tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#load-a-batch-of-data-to-validate)
#
# list datasources of the type PandasDatasource in your project
[datasource['name'] for datasource in context.list_datasources() if datasource['class_name'] == 'PandasDatasource']
datasource_name = # TODO: set to a datasource name from above
# +
# If you would like to validate a file on a filesystem:
batch_kwargs = {'path': "YOUR_FILE_PATH", 'datasource': datasource_name}
# If you already loaded the data into a Pandas Data Frame:
batch_kwargs = {'dataset': "YOUR_DATAFRAME", 'datasource': datasource_name}
batch = context.get_batch(batch_kwargs, expectation_suite_name)
batch.head()
# -
# ## 4. Validate the batch with Validation Operators
#
# `Validation Operators` provide a convenient way to bundle the validation of
# multiple expectation suites and the actions that should be taken after validation.
#
# When deploying Great Expectations in a **real data pipeline, you will typically discover these needs**:
#
# * validating a group of batches that are logically related
# * validating a batch against several expectation suites such as using a tiered pattern like `warning` and `failure`
# * doing something with the validation results (e.g., saving them for a later review, sending notifications in case of failures, etc.).
#
# [Read more about Validation Operators in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#save-validation-results)
# +
# This is an example of invoking a validation operator that is configured by default in the great_expectations.yml file
#Generate a run id, a timestamp, or a meaningful string that will help you refer to validation results. We recommend they be chronologically sortable.
# Let's make a simple sortable timestamp. Note this could come from your pipeline runner (e.g., Airflow run id).
run_id = datetime.utcnow().isoformat().replace(":", "") + "Z"
results = context.run_validation_operator(
"action_list_operator",
assets_to_validate=[batch],
run_id=run_id)
# -
# ## 5. View the Validation Results in Data Docs
#
# Let's now build and look at your Data Docs. These will now include an **data quality report** built from the `ValidationResults` you just created that helps you communicate about your data with both machines and humans.
#
# [Read more about Data Docs in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#view-the-validation-results-in-data-docs)
context.open_data_docs()
# ## Congratulations! You ran Validations!
#
# ## Next steps:
#
# ### 1. Read about the typical workflow with Great Expectations:
#
# [typical workflow](https://docs.greatexpectations.io/en/latest/getting_started/typical_workflow.html?utm_source=notebook&utm_medium=validate_data#view-the-validation-results-in-data-docs)
#
# ### 2. Explore the documentation & community
#
# You are now among the elite data professionals who know how to build robust descriptions of your data and protections for pipelines and machine learning models. Join the [**Great Expectations Slack Channel**](https://greatexpectations.io/slack) to see how others are wielding these superpowers.
| great_expectations/init_notebooks/pandas/validation_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py2] *
# language: python
# name: conda-env-py2-py
# ---
# +
import matplotlib.pyplot as plt
import do_plot_get2 as dpg
import numpy as np
import os,sys,datetime
import traceback
reload(dpg)
folds = 1
Ns = 256
S = 10
maxKrec=S
Ksel=maxKrec
droot1='./out/'
start0 = 10
hRUNFOL = './mrw2dd/bump_lbfgs_gpu_N256J5L8dj1dl4dk0dn2_maxkshift1_factr10maxite500maxcor20_initnormalstdbarx'
hptfile = 'modelC'
import scipy.io as sio
for fol in range(folds):
kstart=fol+1
imgs_pt=dpg.get_kymatio_pt(droot1,hRUNFOL,hptfile,Ns,kstart,maxKrec,Ksel,start0=start0,nbstart=10)
ofile = './synthesis/mrw2dd_modelC_synthesis_ks' + str(kstart-1) +'.mat'
sio.savemat (ofile , {'imgs':imgs_pt})
# -
| plot2/mrw2dd_modelC_export.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from example_robot_data import load
from meshcat_jupyter import PinocchioJupyterVisualizer
robot = load('solo8')
viz = PinocchioJupyterVisualizer(robot.model, robot.collision_model, robot.visual_model)
# -
viz.loadViewerModel()
viz.display(robot.q0)
| ur5 (DAE).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sympy.physics.units import *
from sympy import *
# Rounding:
import decimal
from decimal import Decimal as DX
def iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN):
import sympy
"""
Rounding acc. to DIN EN ISO 80000-1:2013-08
place value = Rundestellenwert
"""
assert pv in set([
# place value # round to:
1, # 1
0.1, # 1st digit after decimal
0.01, # 2nd
0.001, # 3rd
0.0001, # 4th
0.00001, # 5th
0.000001, # 6th
0.0000001, # 7th
0.00000001, # 8th
0.000000001, # 9th
0.0000000001, # 10th
])
try:
tmp = DX(str(float(obj)))
obj = tmp.quantize(DX(str(pv)), rounding=rounding)
except:
for i in range(len(obj)):
tmp = DX(str(float(obj[i])))
obj[i] = tmp.quantize(DX(str(pv)), rounding=rounding)
return obj
# LateX:
kwargs = {}
kwargs["mat_str"] = "bmatrix"
kwargs["mat_delim"] = ""
# kwargs["symbol_names"] = {FB: "F^{\mathsf B}", }
# Units:
(k, M, G ) = ( 10**3, 10**6, 10**9 )
(mm, cm, deg) = ( m/1000, m/100, pi/180)
Newton = kg*m/s**2
Pa = Newton/m**2
MPa = M*Pa
GPa = G*Pa
kN = k*Newton
half = S(1)/2
# ---
def k(phi):
""" element stiffness matrix """
# phi: Angle between two rays 1 and 2:
# 1: ray along global x axis and
# 2: ray along 1-2-axis of rod
# phi is counted positively about z.
(c, s) = ( cos(phi), sin(phi) )
(cc, ss, sc) = ( c*c, s*s, s*c)
return Matrix(
[
[ cc, sc, -cc, -sc],
[ sc, ss, -sc, -ss],
[-cc, -sc, cc, sc],
[-sc, -ss, sc, ss],
])
F, c = var("F, c")
# Stiffness Matrix:
k1 = c*k(135 *pi/180)
pprint("\n\nk1 / c: ")
pprint(k1/c)
# Linear System:
u, F1x, F2x, F2y = var("u, F1x, F2x, F2y")
f_ = Matrix([F1x, -F/2, F2x, F2y])
u_ = Matrix([0, -u, 0, 0])
sol = solve(Eq(k1*u_,f_), [u, F1x, F2x, F2y], dict=True)
pprint("\n\nSolution:")
pprint(sol[0])
# k1 / c:
# ⎡1/2 -1/2 -1/2 1/2 ⎤
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎢-1/2 1/2 1/2 -1/2⎥
# ⎢ ⎥
# ⎣1/2 -1/2 -1/2 1/2 ⎦
#
#
# Solution:
# ⎧ F -F F F⎫
# ⎨F1x: ─, F2x: ───, F2y: ─, u: ─⎬
# ⎩ 2 2 2 c⎭
| ipynb/WB-Klein/5/5.3_cc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/debanga/DeepLearningNotebooks/blob/master/Recurrent_Neural_Networks_Introduction_to_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="VGm0xBsNQBaP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="065615ec-2e71-40e6-db38-20c249c6ceaf"
# Recurrent Neural Networks - LSTM Introduction
# Based on "https://www.kaggle.com/thebrownviking20/intro-to-recurrent-neural-networks-lstm-gru"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
# + id="DPr_tfbUSblo" colab_type="code" colab={}
# Some functions to help out with
def plot_predictions(test,predicted):
plt.plot(test, color='red',label='Real IBM Stock Price')
plt.plot(predicted, color='blue',label='Predicted IBM Stock Price')
plt.title('IBM Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('IBM Stock Price')
plt.legend()
plt.show()
def return_rmse(test,predicted):
rmse = math.sqrt(mean_squared_error(test, predicted))
print("The root mean squared error is {}.".format(rmse))
# + id="JLEZvhRNShWG" colab_type="code" outputId="68bfc371-b836-4ce2-acd6-412438297323" colab={"base_uri": "https://localhost:8080/", "height": 225}
# Get data
dataset = pd.read_csv('https://raw.githubusercontent.com/debanga/DeepLearningNotebooks/master/data/IBM_2006-01-01_to_2018-01-01.csv', index_col='Date', parse_dates=['Date'])
dataset.head()
# + id="1yRqxQu4V0fE" colab_type="code" colab={}
# Checking for missing values
training_set = dataset[:'2016'].iloc[:,1:2].values
test_set = dataset['2017':].iloc[:,1:2].values
# + id="0_7AdHKCXBiX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="2440e278-2887-4e82-c87c-6b704cb7a40c"
# We have chosen 'High' attribute for prices. Let's see what it looks like
dataset["High"][:'2016'].plot(figsize=(16,4),legend=True)
dataset["High"]['2017':].plot(figsize=(16,4),legend=True)
plt.legend(['Training set (Before 2017)','Test set (2017 and beyond)'])
plt.title('IBM stock price')
plt.show()
# + id="hADuoy39Y1yj" colab_type="code" colab={}
# Scaling the training set
sc = MinMaxScaler(feature_range=(0,1))
training_set_scaled = sc.fit_transform(training_set)
# + id="TYT3i0mcY-yl" colab_type="code" colab={}
# Since LSTMs store long term memory state, we create a data structure with 60 timesteps and 1 output
# So for each element of training set, we have 60 previous training set elements
X_train = []
y_train = []
for i in range(60,2769):
X_train.append(training_set_scaled[i-60:i,0])
y_train.append(training_set_scaled[i,0])
X_train, y_train = np.array(X_train), np.array(y_train)
# + id="Qav3dFvgZgs4" colab_type="code" colab={}
# Reshaping X_train for efficient modelling
X_train = np.reshape(X_train, (X_train.shape[0],X_train.shape[1],1))
# + id="ka2-BKRxayTM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="2e13c95c-b5b1-498f-ccc6-656729f39003"
'''The LSTM architecture '''
regressor = Sequential()
# First LSTM layer with Dropout regularisation
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1],1)))
regressor.add(Dropout(0.2))
# Second LSTM layer
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
# Third LSTM layer
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
# Fourth LSTM layer
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
# The output layer
regressor.add(Dense(units=1))
# + id="SIN2rRjYbMMS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 474} outputId="55a8e395-0fec-4301-b015-d0f039e214ba"
regressor.summary()
# + id="o0iHZ841b_Dm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="dfe31ea2-dab6-4aef-e753-8d56edbb8d89"
# Compiling the RNN
regressor.compile(optimizer='rmsprop',loss='mean_squared_error')
# Fitting to the training set
regressor.fit(X_train,y_train,epochs=50,batch_size=32)
# + id="nKk53GCDfQqi" colab_type="code" colab={}
# Now to get the test set ready in a similar way as the training set.
# The following has been done so forst 60 entires of test set have 60 previous values which is impossible to get unless we take the whole
# 'High' attribute data for processing
dataset_total = pd.concat((dataset["High"][:'2016'],dataset["High"]['2017':]),axis=0)
inputs = dataset_total[len(dataset_total)-len(test_set) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
# + id="4bi8qXBXfahD" colab_type="code" colab={}
# Preparing X_test and predicting the prices
X_test = []
for i in range(60,311):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# + id="wLY_6kAQhDsR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="21e67895-f7fa-47c7-b49a-784d91c999b2"
# Visualizing the results for LSTM
plot_predictions(test_set,predicted_stock_price)
# + id="9Hlo0fmahWQv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="238f49b1-3ec8-4838-cb8a-af701239b36b"
# Evaluating our model
return_rmse(test_set,predicted_stock_price)
# + [markdown] id="AG2djbLyiIsu" colab_type="text"
# LSTM is not the only kind of unit that has taken the world of Deep Learning by a storm. We have Gated Recurrent Units(GRU). It's not known, which is better: GRU or LSTM becuase they have comparable performances. GRUs are easier to train than LSTMs.
#
# Gated Recurrent Units
# In simple words, the GRU unit does not have to use a memory unit to control the flow of information like the LSTM unit. It can directly makes use of the all hidden states without any control. GRUs have fewer parameters and thus may train a bit faster or need less data to generalize. But, with large data, the LSTMs with higher expressiveness may lead to better results.
#
# They are almost similar to LSTMs except that they have two gates: reset gate and update gate. Reset gate determines how to combine new input to previous memory and update gate determines how much of the previous state to keep. Update gate in GRU is what input gate and forget gate were in LSTM. We don't have the second non linearity in GRU before calculating the outpu, .neither they have the output gate.
#
# Source: Quora
# + id="zOMYaZtiiLml" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7f13d6aa-5f57-4727-ce2f-ec21ca78dae9"
# The GRU architecture
regressorGRU = Sequential()
# First GRU layer with Dropout regularisation
regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],1), activation='tanh'))
regressorGRU.add(Dropout(0.2))
# Second GRU layer
regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],1), activation='tanh'))
regressorGRU.add(Dropout(0.2))
# Third GRU layer
regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],1), activation='tanh'))
regressorGRU.add(Dropout(0.2))
# Fourth GRU layer
regressorGRU.add(GRU(units=50, activation='tanh'))
regressorGRU.add(Dropout(0.2))
# The output layer
regressorGRU.add(Dense(units=1))
# Compiling the RNN
regressorGRU.compile(optimizer=SGD(lr=0.01, decay=1e-7, momentum=0.9, nesterov=False),loss='mean_squared_error')
# Fitting to the training set
regressorGRU.fit(X_train,y_train,epochs=50,batch_size=150)
# + id="sl1AOkpSoup3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 474} outputId="7e8c1aa4-dbe7-4592-a274-7f1641a07554"
regressorGRU.summary()
# + id="MGTMhGElkQHB" colab_type="code" colab={}
# Preparing X_test and predicting the prices
X_test = []
for i in range(60,311):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
GRU_predicted_stock_price = regressorGRU.predict(X_test)
GRU_predicted_stock_price = sc.inverse_transform(GRU_predicted_stock_price)
# + id="EiMxNnTpkVrg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="bffa67d5-2b9a-4e51-fd2c-6dd8fdae3ca5"
# Visualizing the results for GRU
plot_predictions(test_set,GRU_predicted_stock_price)
# + id="cuYnDrpMlfIb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="356e5fa6-de12-4b2e-d15e-b4254a4bfc0b"
# Evaluating GRU
return_rmse(test_set,GRU_predicted_stock_price)
# + [markdown] id="7O0Er1qCmOIV" colab_type="text"
# The above models make use of test set so it is using last 60 true values for predicting the new value(I will call it a benchmark). This is why the error is so low. Strong models can bring similar results like above models for sequences too but they require more than just data which has previous values. In case of stocks, we need to know the sentiments of the market, the movement of other stocks and a lot more. So, don't expect a remotely accurate plot.
#
# We will generate a sequence using just initial 60 values instead of using last 60 values for every new prediction
# + id="eARXqna8mOvy" colab_type="code" colab={}
# Preparing sequence data
initial_sequence = X_train[2708,:]
sequence = []
for i in range(251):
new_prediction = regressorGRU.predict(initial_sequence.reshape(initial_sequence.shape[1],initial_sequence.shape[0],1))
initial_sequence = initial_sequence[1:]
initial_sequence = np.append(initial_sequence,new_prediction,axis=0)
sequence.append(new_prediction)
sequence = sc.inverse_transform(np.array(sequence).reshape(251,1))
# + id="t2Wd_Z_OnEWU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="db7cf34b-b5b9-4d50-9626-c04b08e168b6"
# Visualizing the sequence
plot_predictions(test_set,sequence)
# Evaluating the sequence
return_rmse(test_set,sequence)
| Recurrent_Neural_Networks_Introduction_to_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from operator import getitem
from functools import partial
full_df = pd.read_json('c.json')
# full_df = full_df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id'], axis = 1)
full_df.head(50)
full_df = full_df.query('name == "South Africa"')
full_df.head(50)
def africa():
df = full_df.query('region == "Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
africa().head()
def asia():
df = full_df.query('region == "Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
asia().head()
def oceania():
df = full_df.query('region == "Oceania"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
oceania().head()
def europe():
df = full_df.query('region == "Europe"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
europe().head()Americas
def americas():
df = full_df.query('region == "Americas"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
americas().head()
def polar():
df = full_df.query('region == "Polar"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
polar().head()
# ### Sub-regions
def americas():
df = full_df.query('region == "Americas"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
americas().head()
def south_asia():
df = full_df.query('subregion == "Southern Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
south_asia().head()
def northern_europe():
df = full_df.query('subregion == "Northern Europe"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
northern_europe().head()
def northern_africa():
df = full_df.query('subregion == "Northern Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
northern_africa().head()
def south_europe():
df = full_df.query('subregion == "Southern Europe"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
south_europe().head()
def polynesia():
df = full_df.query('subregion == "Polynesia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
polynesia().head()
def middle_africa():
df = full_df.query('subregion == "Middle Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
middle_africa().head()
def caribbean():
df = full_df.query('subregion == "Caribbean"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
caribbean().head()
def south_america():
df = full_df.query('subregion == "South America"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
south_america().head()
def western_asia():
df = full_df.query('subregion == "Western Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
western_asia().head()
def western_europe():
df = full_df.query('subregion == "Western Europe"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
western_europe().head()
def central_america():
df = full_df.query('subregion == "Central America"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
central_america().head()
def western_africa():
df = full_df.query('subregion == "Western Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
western_africa().head()
def northern_america():
df = full_df.query('subregion == "Northern America"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
northern_america().head()
def southern_africa():
df = full_df.query('subregion == "Southern Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
southern_africa().head()
def eastern_africa():
df = full_df.query('subregion == "Eastern Africa"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
eastern_africa().head()
def south_eastern_asia():
df = full_df.query('subregion == "South-Eastern Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
south_eastern_asia().head()
def eastern_europe():
df = full_df.query('subregion == "Eastern Europe"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
eastern_europe().head()
def eastern_asia():
df = full_df.query('subregion == "Eastern Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
eastern_asia().head()
def australia_newzealand():
df = full_df.query('subregion == "Australia and New Zealand"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
australia_newzealand().head()
def melanesia():
df = full_df.query('subregion == "Melanesia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
melanesia().head()
def central_asia():
df = full_df.query('subregion == "Central Asia"')
df = df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
central_asia().head()
def countries():
df = full_df.drop(['timezones', 'translations', 'states', 'emojiU', 'native', 'tld', 'id', 'iso3', 'iso2', 'currency_symbol', 'currency', 'phone_code', 'subregion', 'region',], axis = 1)
return df
countries().head()
| countries_data_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''draco'': conda)'
# metadata:
# interpreter:
# hash: 1417893cfb28aac8f4900ac927ab2a1a6ab7e8af638d894f5471e1fba60b1e80
# name: python3
# ---
# # Read Data and Generate the Schema
#
# Here, we will cover how to load data and use inferred statistics in Draco.
# ## Available functions
#
# The main functions allow you to get the schema from a Pandas dataframe or a file. These functions return a schema as a dictionary, which you can encode as Answer Set Programming facts using our generic `dict_to_facts` encoder.
#
#
# ```{eval-rst}
# .. autofunction:: draco.schema.schema_from_dataframe
# .. autofunction:: draco.schema.schema_from_file
# ```
# ## Usage Example
from draco import schema_from_dataframe, dict_to_facts
# In this example, we use a weather dataset from Vega datasets but this could be any Pandas dataframe.
from vega_datasets import data
df = data.seattle_weather()
# We can then call `schema_from_dataframe` to get schema information from the pandas dataframe. The schema information is a dictionary.
schema = schema_from_dataframe(df)
schema
# We can then convert the schema dictionary into facts that Dracos constraint solver can use with `dict_to_facts`. The function returns a list of facts. The solver will be able to parse these facts and consider them in the recommendation process.
dict_to_facts(schema)
| docs/api/schema.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .ps1
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .NET (PowerShell)
# language: PowerShell
# name: .net-powershell
# ---
# # T1056.002 - Input Capture: GUI Input Capture
# Adversaries may mimic common operating system GUI components to prompt users for credentials with a seemingly legitimate prompt. When programs are executed that need additional privileges than are present in the current user context, it is common for the operating system to prompt the user for proper credentials to authorize the elevated privileges for the task (ex: [Bypass User Access Control](https://attack.mitre.org/techniques/T1548/002)).
#
# Adversaries may mimic this functionality to prompt users for credentials with a seemingly legitimate prompt for a number of reasons that mimic normal usage, such as a fake installer requiring additional access or a fake malware removal suite.(Citation: OSX Malware Exploits MacKeeper) This type of prompt can be used to collect credentials via various languages such as AppleScript(Citation: LogRhythm Do You Trust Oct 2014)(Citation: OSX Keydnap malware) and PowerShell(Citation: LogRhythm Do You Trust Oct 2014)(Citation: Enigma Phishing for Credentials Jan 2015).
# ## Atomic Tests
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
# ### Atomic Test #1 - AppleScript - Prompt User for Password
# Prompt User for Password (Local Phishing)
# Reference: http://fuzzynop.blogspot.com/2014/10/osascript-for-local-phishing.html
#
# **Supported Platforms:** macos
# #### Attack Commands: Run with `bash`
# ```bash
# osascript -e 'tell app "System Preferences" to activate' -e 'tell app "System Preferences" to activate' -e 'tell app "System Preferences" to display dialog "Software Update requires that you type your password to apply changes." & return & return default answer "" with icon 1 with hidden answer with title "Software Update"'
# ```
Invoke-AtomicTest T1056.002 -TestNumbers 1
# ### Atomic Test #2 - PowerShell - Prompt User for Password
# Prompt User for Password (Local Phishing) as seen in Stitch RAT. Upon execution, a window will appear for the user to enter their credentials.
#
# Reference: https://github.com/nathanlopez/Stitch/blob/master/PyLib/askpass.py
#
# **Supported Platforms:** windows
# #### Attack Commands: Run with `powershell`
# ```powershell
# # Creates GUI to prompt for password. Expect long pause before prompt is available.
# $cred = $host.UI.PromptForCredential('Windows Security Update', '',[Environment]::UserName, [Environment]::UserDomainName)
# # Using write-warning to allow message to show on console as echo and other similar commands are not visable from the Invoke-AtomicTest framework.
# write-warning $cred.GetNetworkCredential().Password
# ```
Invoke-AtomicTest T1056.002 -TestNumbers 2
# ## Detection
# Monitor process execution for unusual programs as well as malicious instances of [Command and Scripting Interpreter](https://attack.mitre.org/techniques/T1059) that could be used to prompt users for credentials.
#
# Inspect and scrutinize input prompts for indicators of illegitimacy, such as non-traditional banners, text, timing, and/or sources.
| playbook/tactics/credential-access/T1056.002.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# SOP011 - Set kubernetes configuration context
# =============================================
#
# Description
# -----------
#
# Set the kubernetes configuration to use.
#
# NOTE: To view available contexts use the following TSG:
#
# - [TSG010 - Get configuration
# contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb)
#
# Steps
# -----
#
# ### Parameters
# + tags=["parameters"]
context_name = None
# -
# ### Common functions
#
# Define helper functions used in this notebook.
# + tags=["hide_input"]
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop011-set-kubernetes-context.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['azdata login', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
# -
# ### List available contexts
if context_name is None:
contexts = run('kubectl config get-contexts --output name', return_output=True)
contexts =contexts.split("\n")[:-1]
counter = 0
for context in contexts:
print(f'{counter}. {context}')
counter += 1
else:
print(f'context_name: {context_name}')
# ### Select a context (if not set as a parameter)
# +
if context_name is None:
context_name = contexts[5] # <-- select context here (set ordinal)
print(f'context_name: {context_name}')
# -
# ### Log out using azdata
#
# To avoid a situation where the `Kubernetes` context is for a cluster
# which is not hosting the Big Data Cluster `azdata` currently logged
# into.
run('azdata logout')
# ### Set the kubernetes configuration to use
run(f'kubectl config use-context {context_name}')
print('Notebook execution complete.')
# Related
# -------
#
# - [TSG010 - Get configuration
# contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb)
| Big-Data-Clusters/CU4/Public/content/common/sop011-set-kubernetes-context.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from ContentUtil import ContentUtil
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
ctl = ContentUtil()
AI_data, Not_AI_data = ctl.loadData("datasets/AI/", "datasets/NOT/")
AI = np.array(AI_data).reshape(-1,1)
Not_AI = np.array(Not_AI_data).reshape(-1,1)
data = np.concatenate((AI, Not_AI), axis=0)
label = ["AI"]*AI.shape[0]+["NOT"]*Not_AI.shape[0]
label = np.array(label).reshape(-1,1)
dataset = np.concatenate((data,label),axis=1)
df = pd.DataFrame(dataset,columns=["data","label"])
dfshf = shuffle(df).reset_index(drop=True)
dfshf.head()
df["label"].value_counts().plot(kind='bar',title="Distribution of Dataset")
df["length"] = df["data"].apply(lambda x:len(x.strip().split()))
df["length"].plot.hist(bins=[2000,4000,6000,8000,10000,12000],title="Distribution of text length")
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
stopwords = ctl.getStopWords()
def preprocessing(text):
stemmer = PorterStemmer()
tokens = word_tokenize(str(text))
newtext = ""
for w in tokens:
if w not in stopwords and len(w)>1:
newtext = newtext + " " + stemmer.stem(w)
return newtext
df["tokens"]=df["data"].apply(preprocessing)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest,chi2
vectorizer = TfidfVectorizer(analyzer='word',max_features=10000,ngram_range=(1, 2),stop_words=stopwords)
tfidf = vectorizer.fit_transform(df['tokens'])
word_reduced = SelectKBest(chi2,k=5000).fit_transform(tfidf, label)
importance = np.argsort(np.asarray(tfidf.sum(axis=0)).ravel())[::-1]
tfidf_feature_names = np.array(vectorizer.get_feature_names())
n_top = 200
topwords = tfidf_feature_names[importance[:n_top]]
topwords
X, y = word_reduced, df["label"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2021)
# +
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.get_cmap("Blues")):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# -
def cv_all(word_reduced, label):
svm_cv_results = cross_validate(svm_clf, word_reduced, label,
scoring=['accuracy', 'recall_macro', 'precision_macro', 'f1_macro'], cv=10,
return_train_score=False,return_estimator=True)
nb_cv_results = cross_validate(nb_clf, word_reduced, label,
scoring=['accuracy', 'recall_macro', 'precision_macro', 'f1_macro'], cv=10,
return_train_score=False,return_estimator=True)
dt_cv_results = cross_validate(dt_clf, word_reduced, label,
scoring=['accuracy', 'recall_macro', 'precision_macro', 'f1_macro'], cv=10,
return_train_score=False,return_estimator=True)
knn_cv_results = cross_validate(knn_clf, word_reduced, label,
scoring=['accuracy', 'recall_macro', 'precision_macro', 'f1_macro'], cv=10,
return_train_score=False,return_estimator=True)
rfc_cv_results = cross_validate(rfc_clf, word_reduced, label,
scoring=['accuracy', 'recall_macro', 'precision_macro', 'f1_macro'], cv=10,
return_train_score=False,return_estimator=True)
return svm_cv_results, nb_cv_results, dt_cv_results, knn_cv_results, rfc_cv_results
from mlxtend.evaluate import paired_ttest_kfold_cv
from scipy.stats import friedmanchisquare
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
from sklearn.model_selection import validation_curve
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.multiclass import unique_labels
# build classifiers
seed = 2021
svm_clf = LinearSVC(random_state=seed, tol=1e-5)
nb_clf = MultinomialNB()
dt_clf = DecisionTreeClassifier(criterion='entropy', max_features='sqrt', random_state=seed)
knn_clf = KNeighborsClassifier(n_neighbors=7, n_jobs=2)
rfc_clf = RandomForestClassifier(random_state=seed)
classes = set(y)
classes
r1, r2, r3, r4, r5 = cv_all(X_train,y_train)
def predict_and_plot(clf,X_test,y_test,title):
ind = np.argmax(clf["test_accuracy"])
final_clf = clf['estimator'][ind]
y_pred = final_clf.predict(X_test)
cfm = confusion_matrix(y_test, y_pred)
plot_confusion_matrix(cfm,classes,title=title)
predict_and_plot(r1,X_test,y_test,"SVM Confusion Matrix")
predict_and_plot(r2,X_test,y_test,"Naive Bayes Confusion Matrix")
predict_and_plot(r3,X_test,y_test,"Descion Tree Confusion Matrix")
predict_and_plot(r4,X_test,y_test,"KNN Confusion Matrix")
predict_and_plot(r5,X_test,y_test,"RandomForest Confusion Matrix")
def plot_classification_report(clf, X, y):
class_names = unique_labels(y)
error_evaluation = cross_val_predict(estimator=clf, X=X, y=y, cv=10)
print(classification_report(y, error_evaluation, target_names=class_names))
plot_classification_report(svm_clf, X_train, y_train)
plot_classification_report(rfc_clf, X_train, y_train)
# the function of plotting learning curve
def plot_learning_curve(clf, X, y, clf_name,size):
train_sizes, train_scores, valid_scores = learning_curve(clf, X, y,
train_sizes=size, cv=10)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(valid_scores, axis=1)
test_scores_std = np.std(valid_scores, axis=1)
plt.grid()
plt.title("Learning curve for " + clf_name)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.xlabel("Sample Size")
plt.ylabel("Accuracy")
plt.legend(loc="best")
plt.show()
plot_learning_curve(svm_clf, X_train, y_train, "SVM",[500,1000,1500,2000,2500])
plot_learning_curve(rfc_clf, X_train, y_train, "RFC",[500,1000,1500,2000,2500])
# #### Using paired t-test to check if two classifiers have difference
def paired_ttest(X, y, clf1, clf2):
t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=np.array(y))
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
# Given null hypothesis and alternative hypothesis <br>
# H0 : means difference between two classifier is 0 <br>
# H1 : means difference between two classifier is not 0 <br>
print("t test for classfiers SVM and RFC")
paired_ttest(X, y, svm_clf, rfc_clf)
# P value is 0.022 < 0.05, reject the null hypothesis H0. <br> thus, two classifier have siginificant difference
| textClassification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#sam cleaning/eda
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
df = pd.read_csv('../data/tess_oi.csv')
data_dict = pd.read_csv('../data/tess_oi_data_dictionary.csv')
df.head()
df.shape
data_dict
df.isna().sum()
df.dtypes
# -Imputing Nulls-
#dropping everything that has over half the dataset as null
df = df.dropna(axis = 1, thresh= 1271)
df.isna().sum()
#drop all error columns and limit columns
df = df.drop(columns = [column for column in df.columns if 'err' in column])
df = df.drop(columns = [column for column in df.columns if 'lim' in column])
df.isna().sum()
#pad the nulls out to 20, and impute the mean beyond that limit.
df = df.fillna(method = 'pad', limit = 20)
df = df.fillna(df.mean())
df.isna().sum()
df.to_csv('../data/cleaned.csv', index= False)
df.head()
df.dtypes
# +
#casual initial model to see how it compares to a baseline
# -
#turn the classification labels into numbers
tfop_dict = {'FP': 0, 'CP': 1, 'PC':1,'KP':1,'APC':0,'FA':0}
df['tfopwg_num'] = df['tfopwg_disp'].map(tfop_dict)
df['tfopwg_disp'].head()
df['tfopwg_num'].head()
to_drop = ['rowid','toi','toipfx','tid','ctoi_alias','tfopwg_disp','tfopwg_num','rastr','decstr','toi_created','rowupdate']
X = df.drop(columns = to_drop)
y = df['tfopwg_num']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 42)
ss = StandardScaler()
X_train_sc = ss.fit_transform(X_train)
X_test_sc = ss.fit_transform(X_test)
model = LogisticRegression()
model.fit(X_train_sc, y_train)
X_train_sc.shape
baseline = np.random.randint(low = 0, high = 2, size = X_train_sc.shape[0])
print(model.score(X_train_sc, y_train))
print(model.score(X_test_sc, y_test))
print(model.score(X_train_sc, baseline))
# +
#pretty good! Could be better,but I like those odds right out of the gate. We'll see in the other notebook if we can do better.
| code/sam-cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# hasthags indicate notes about code; the code below imports a few packages we will need for this analysis
import pandas as pd
# import statsmodels.api as sm
# import pylab as pl
import numpy as np
# %matplotlib notebook
# -
# read the data in
df = pd.read_csv("binary.csv")
# +
print(df.head())
df.columns = ["admit", "gre", "gpa", "prestige"]
df.columns
print(df.describe())
# take a look at the standard deviation of each column
df.std()
# frequency table cutting presitge and whether or not someone was admitted
pd.crosstab(df['admit'], df['prestige'], rownames=['admit'])
df.hist()
# -
| logit-regression/logit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="EYY2wm7QNXDg" colab_type="code" colab={}
'''
Numpy Library
'''
import numpy as N
# + id="0uwBnw_7Nuwa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f1e6791f-ee37-4c6e-f0bb-590a0697355a"
'''
Array Method in numpy
array() -> used to create multi dimensional arrays
'''
help(N.array)
# + id="3JvhDaLqOP97" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="6cf5cf50-ef1d-4e33-9b9a-e0cb3ee3e209"
'''
rank 1 array (1D Numpy array)
'''
ar = N.array([1,2,3,4,5])
print(ar)
print(type(ar))
print(ar[2])
print(ar[2:4])
# + id="wzta0dX5O_rz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="8ba9c8f8-a1ee-4f00-e70a-c6214491baaa"
'''
attributes and properties
Shape
-> used to check dimension of array
dtype
-> used to know datatype of array
'''
print(ar.shape)
print(ar.dtype)
# + id="rLbVnzSdRnh1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="44396280-9c43-4cec-99e1-a6612e490ba9"
'''
rank 2 matrix
'''
mat = N.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print(mat)
print(mat.shape)
print(mat.dtype)
# + id="oRyX_v5_SjHB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="343b63ee-72de-4f89-f376-b2756fa80b00"
'''
For with numpy arrays
'''
print("Without range method")
for i in ar:
print(i)
print("With range and len methods")
for i in range(len(ar)):
print(i,':',ar[i])
# + id="2LdrjnLITPfO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="4d848660-7628-457f-f4a8-9928c8397391"
'''
Inbuild Method in Numpy
1. zeros
-> used to fill array with zeros (float datatype)
2. ones
-> used to fill array with ones (float datatype)
'''
ar = N.zeros(5)
print("One Dimensional Zeroes",ar,sep='\n')
mat = N.ones((2,2))
print("Two Dimensional Ones",mat,sep='\n')
# + id="Vu3QwQz2UurO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="e0ac2760-2f47-4291-a97b-9298f4018ff2"
'''
Inbuild Method in Numpy
3. full
-> used to fill an array with constant value
-> syntax : full((dimension),constantvalue)
'''
ar = N.full(5,6)
mat = N.full((3,3),'abc')
print(ar)
print(mat)
# + id="8BLDbPWjXWZF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 260} outputId="50d1a38d-cad3-4b0f-f7e0-e3a444717635"
'''
Inbuild Method in Numpy
4. identity
-> used to create an identity matrix
5. astype
-> used to change datatype
'''
mat = N.identity(7)
print(mat)
ar = mat.astype(int)
print(ar)
# + id="9hsDikmGbpvi" colab_type="code" outputId="28cbdfa0-410e-497a-d25f-b3223c90c241" colab={"base_uri": "https://localhost:8080/", "height": 156}
'''
Inbuild Method in Numpy
6. arange
-> used to generate range of integer values similar to range
7. linspace
-> divides a group into evenly spaced intervals
-> by default 50 groups are created in given range
'''
print(N.arange(10))
print(N.arange(1,11))
print(N.arange(2,50,2))
print(N.arange((10),dtype='float'))
print(N.linspace(1,50))
print(N.linspace(1,100,num=5))
# + id="81c-286Hfc23" colab_type="code" outputId="e2f1009f-abfe-4308-c2b7-11682beac2f4" colab={"base_uri": "https://localhost:8080/", "height": 260}
'''
Inbuild Method in Numpy
8. reshape
-> changes the dimension of an array
'''
ar = N.array([1,2,3,4,5,6,7,8,9])
print(ar.reshape(3,3))
print(N.arange(1,28).reshape(3,3,3))
# + id="yE7zUJxPV1Yt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="24fef93b-b63e-40fc-b1e0-b1caa10fafca"
'''
Inbuild Method in random class generally used with numpy
1. random
-> used to fill an array with random values between 0 and 1
2. randint
-> used to fill random within a given range
'''
ar = N.random.random()
mat = N.random.randint(1,101, size=(3,3))
print(ar)
print(mat)
# + colab_type="code" id="HXUlKttGbe8X" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="9797409f-1384-4659-9129-253d9b1533ee"
'''
Datatypes in Numpy 1
'''
iar = N.array([1,2,3,4,5])
far = N.array([1.,2.,3.,4.,5.])
car = N.array(['a','b','c','d','e'])
sar = N.array(['abc','bcd','cde'])
print(iar.dtype)
print(far.dtype)
print(car.dtype)
print(sar.dtype)
# + id="z5fcilNZZv8U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="31674fea-0482-4949-9daa-75a575a3c99b"
'''
Datatypes in Numpy 2
'''
iar = N.array([1,2,3,4,5],dtype='float32')
far = N.array([10.,20.,30.,40.,50.],dtype=N.int)
ar = N.zeros((2,2),dtype=int)
print(iar.dtype)
print(iar)
print(far.dtype)
print(far)
print(ar.dtype)
print(ar)
# + id="P8UvkuNdaJp7" colab_type="code" outputId="9846e705-2ca5-41fd-c7c2-4eefa5511429" colab={"base_uri": "https://localhost:8080/", "height": 156}
'''
Numpy Broadcasting
rules to be performed while performing operations on arrays
1. either arrays should be of same size
2. or one array must be of size 1
'''
a = N.array([1,2,3])
b = N.array([2,2,2])
c = N.array([3])
print(a*b)
print(a*c)
m1 = N.array([[1,2],[3,4]])
m2 = N.array([[2,2],[2,2]])
print(m1+m2)
print(m2-c)
m3 = N.array([10,100])
print(m1*m3)
| ml_course/ipynbfiles/NumpyLibrary.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++17
// language: C++17
// name: xeus-cling-cpp17
// ---
// # Referências
// - <NAME>. *C++ for Geniuses*. URL: http://mypccourse.com/cosc1436/book/
// - Bjarne Stroustrup's homepage. URL: http://www.stroustrup.com/
// - MinGW. URL: https://nuwen.net/mingw.html
// - C++. URL: http://www.stroustrup.com/C++.html
| notebooks/programacao-i/referenciais.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# For Development and debugging:
# Reload modul without restarting the kernel
# %load_ext autoreload
# %autoreload 2
# +
import pyreadr
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import pandas as pd
pd.options.display.max_columns = None
import os
import sys
import matplotlib.pyplot as plt
import json
import copy
import seaborn as sns
sns.set_theme(style="darkgrid")
# Add EXTERNAL_LIBS_PATH to sys paths (for loading libraries)
EXTERNAL_LIBS_PATH = '/home/hhughes/Documents/Master_Thesis/Project/workspace/libs'
sys.path.insert(1, EXTERNAL_LIBS_PATH)
# Load cortum libs
import NN_interpretability as nn_inter
import Data_augmentation as data_aug
import tfds_utils
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# +
# Set parameters
params = {}
params['base_path'] = '/data/Master_Thesis_data/Raw_data'
params['tf_ds_name'] = 'mpp_ds_normal_dmso_z_score'
params['local_tf_datasets'] = '/data/Master_Thesis_data/datasets/tensorflow_datasets'
# Overlapped cells
black_list = [277420, 195486]
# -
# # Load Body clusters data
temp_path = os.path.join(params['base_path'], 'features_background_subtracted_mean_normalised.rds')
temp_data = pyreadr.read_r(temp_path)
body_clusters_df = temp_data[None]
# # Load TFDS
# +
# Path where tf datasets are
dataset, ds_info = tfds.load(
name=params['tf_ds_name'],
data_dir=params['local_tf_datasets'],
# If False, returns a dictionary with all the features
as_supervised=False,
shuffle_files=False,
with_info=True)
# Load TFDS metadata
tfds_metadata = tfds_utils.Costum_TFDS_metadata().load_metadata(ds_info.data_dir)
tfds_metadata.keys()
# Load splits
train_data, val_data, test_data = dataset['train'], dataset['validation'], dataset['test']
# -
ds_info.splits
ds_info.features
metadata_df = tfds_metadata['metadata_df']
mask = metadata_df.mapobject_id_cell.isin(black_list)
metadata_df = metadata_df[~mask].copy()
channels_df = tfds_metadata['channels_df']
channel_names = [c.split("_")[1:] for c in channels_df.name.values]
channel_names = ["_".join(c) for c in channel_names]
channels_df['channel_name'] = channel_names
channels_df
# # Add Body cluster data to metadata
# +
column_names = ['Nuclei_Intensity_mean_00_EU',
'PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_11_PML',
'PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_20_SP100',
'NCL_Bodies_Cells_Mean_Intensity_mean_21_NCL'
]
temp_df = body_clusters_df[['mapobject_id']+column_names].copy()
temp_df.columns = ['mapobject_id_cell']+column_names
metadata_df = metadata_df.merge(temp_df,
left_on='mapobject_id_cell',
right_on='mapobject_id_cell',
how='left')
metadata_df
# -
# # Analysis
# +
mean_EU_channel = metadata_df['00_EU_avg']
mean_EU = metadata_df['Nuclei_Intensity_mean_00_EU'].values
mean_SP100 = metadata_df['PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_20_SP100'].values
mean_PML = metadata_df['PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_11_PML'].values
mean_NCL = metadata_df['NCL_Bodies_Cells_Mean_Intensity_mean_21_NCL'].values
EU_SP100_corr = np.corrcoef(mean_EU, mean_SP100)[0,1]
chEU_SP100_corr = np.corrcoef(mean_EU_channel, mean_SP100)[0,1]
EU_PML_corr = np.corrcoef(mean_EU, mean_PML)[0,1]
SP100_PML_corr = np.corrcoef(mean_SP100, mean_PML)[0,1]
EU_NCL_corr = np.corrcoef(mean_EU, mean_NCL)[0,1]
print(EU_SP100_corr, chEU_SP100_corr, EU_PML_corr, SP100_PML_corr, EU_NCL_corr)
# -
cell_cycles = ['G1', 'S', 'G2']
for cc in cell_cycles:
mask = metadata_df['cell_cycle'] == cc
temp_df = metadata_df[mask]
mean_EU = temp_df['Nuclei_Intensity_mean_00_EU'].values
mean_SP100 = temp_df['PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_20_SP100'].values
EU_SP100_corr = np.corrcoef(mean_EU, mean_SP100)[0,1]
print('Cel_cycle: {}, {}'.format(cc, EU_SP100_corr))
plt.figure(figsize=(8,8))
sns.scatterplot(data=temp_df,
x='Nuclei_Intensity_mean_00_EU',
y='PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_20_SP100',
hue='cell_cycle'
)
plt.ylabel('mean_SP100')
plt.xlabel('mean_EU')
sns.scatterplot(data=metadata_df,
x='Nuclei_Intensity_mean_00_EU',
y='PML_SP100_Bodies_Total_Nuclei_Mean_Intensity_mean_20_SP100',
hue='cell_cycle'
)
# # correlation between EU and NCL
[col for col in body_clusters_df.columns if ('SON' in col) and ('mean' in col)]
| workspace/Interpretability/SP100_correlation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.insert(0, '../')
# +
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import torch
from dataset import get_sets, imshow
from capsule.net import CapsNet
# -
# ## Load model
capsnet = CapsNet()
capsnet.load_state_dict(
torch.load(open('../models/capsnet_state.pth', 'rb'), map_location=torch.device('cpu'))
)
train_set, test_set = get_sets()
image, target = test_set[15]
imshow(image)
print('Target:', target)
output, norm, reconstruction = capsnet(image.unsqueeze(0))
imshow(reconstruction[0].detach().view(28, 28).numpy())
# ## 16 Vec
imshow(image)
output, norm, reconstruction = capsnet(image.unsqueeze(0))
output = output[0].detach()
f, axarr = plt.subplots(5,5)
for feature in range(10, 15):
for i in range(5):
a = output.clone()
a[5][feature] += i*2
axarr[feature-10, i].imshow(capsnet.decoder(a.unsqueeze(0)).detach().view(28, 28).numpy(), cmap='binary')
# ## Custom tests
image = np.array(Image.open('./tests/left_45.png').convert('LA').getdata(band=1)).reshape(28, 28)
input = torch.tensor(image, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
imshow(image)
# +
output, norm, reconstruction = capsnet(input)
imshow(reconstruction[0].detach().view(28, 28).numpy())
norm.argmax().item()
# -
| mnist/playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# #### **Title**: Segments Element
#
# **Dependencies** Bokeh
#
# **Backends** [Bokeh](./Segments.ipynb), [Matplotlib](../matplotlib/Segments.ipynb)
import numpy as np
import holoviews as hv
from holoviews import dim
hv.extension('bokeh')
# `Segments` visualizes a collection of line segments, each starting at a position (`x0`, `y0`) and ending at a position (`x1`, `y1`). To specify it, we hence need four key dimensions, listed in the order (`x0`, `y0`, `x1`, `y1`), and an arbitrary number of value dimensions as attributes to each line segment.
# ##### Basic usage
# Declare mock data:
event = ['A', 'B']
data = dict(
start=[np.datetime64('1999'), np.datetime64('2001')],
end=[np.datetime64('2010'), np.datetime64('2020')],
start_event = event,
end_event = event
)
# Define the `Segments`:
seg = hv.Segments(data, [hv.Dimension('start', label='Year'),
hv.Dimension('start_event', label='Event'), 'end', 'end_event'])
# Display and style the Element:
seg.opts(color='k', line_width=10)
# ##### A fractal tree
# +
from functools import reduce
def tree(N):
"""
Generates fractal tree up to branch N.
"""
# x0, y0, x1, y1, level
branches = [(0, 0, 0, 1)]
theta = np.pi/5 # branching angle
r = 0.5 # length ratio between successive branches
# Define function to grow successive branches given previous branch and branching angle
angle = lambda b: np.arctan2(b[3]-b[1], b[2]-b[0])
length = lambda b: np.sqrt((b[3]-b[1])**2 + (b[2]-b[0])**2)
grow = lambda b, ang: (b[2], b[3],
b[2] + r*length(b)*np.cos(angle(b)+ang),
b[3] + r*length(b)*np.sin(angle(b)+ang))
ctr = 1
while ctr<=N:
yield branches
ctr += 1
branches = [[grow(b, theta), grow(b, -theta)] for b in branches]
branches = reduce(lambda i, j: i+j, branches)
t = reduce(lambda i, j: i+j, tree(14))
data = np.array(t[1:])
# -
# Declare a `Segments` Element and add an additional value dimension `c` that we can use for styling:
s = hv.Segments(np.c_[data, np.arange(len(data))], ['x', 'y', 'x1', 'y1'], 'c')
# Now, let's style the Element into a digital broccoli painting:
s.opts(xaxis=None, yaxis=None, height=400, width=400,toolbar='above',
color=np.log10(1+dim('c')), cmap='Greens', line_width=15)
# ##### Cantor set
# +
def cantor(N):
"""
Generates a Cantor set up to iteration N, cutting out the middle 9th of each interval
at each step.
"""
y = 0
intervals = [(0, 1, y)]
while y<=N:
yield intervals
dx = (intervals[0][1]-intervals[0][0])/9*4
y += 1
intervals = [[(i[0], i[0]+dx, y), (i[1]-dx, i[1], y)] for i in intervals]
intervals = reduce(lambda i, j: i+j, intervals)
cl = reduce(lambda i, j: i+j, cantor(12))
x0, x1, y = zip(*cl)
data = np.array(cl)
# -
s = hv.Segments((x0, y, x1, y, y), vdims=['c'])
s.opts(xaxis=None, yaxis=None, height=160, width=500,
toolbar='above', line_width=8, color=dim('c'), cmap='fire_r')
| examples/reference/elements/bokeh/Segments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Pyolite
# language: python
# name: python
# ---
# # Altair in `JupyterLite`
#
# **Altair** is a declarative statistical visualization library for Python.
#
# Most of the examples below are from: https://altair-viz.github.io/gallery
# ## Import the dependencies:
# code snippet below from @lrowe: https://github.com/jupyterlite/jupyterlite/issues/110#issuecomment-850916083
import micropip
# Work around https://github.com/pyodide/pyodide/issues/1614 which is now fixed in pyodide
await micropip.install('Jinja2')
micropip.PACKAGE_MANAGER.builtin_packages['jinja2'] = micropip.PACKAGE_MANAGER.builtin_packages['Jinja2']
# Last version of jsonschema before it added the pyrsistent dependency (native code, no wheel)
await micropip.install("https://files.pythonhosted.org/packages/77/de/47e35a97b2b05c2fadbec67d44cfcdcd09b8086951b331d82de90d2912da/jsonschema-2.6.0-py2.py3-none-any.whl")
await micropip.install("altair")
# ## Simple Bar Chart
# +
import altair as alt
import pandas as pd
source = pd.DataFrame({
'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]
})
alt.Chart(source).mark_bar().encode(
x='a',
y='b'
)
# -
# ## Simple Heatmap
# +
import altair as alt
import numpy as np
import pandas as pd
# Compute x^2 + y^2 across a 2D grid
x, y = np.meshgrid(range(-5, 5), range(-5, 5))
z = x ** 2 + y ** 2
# Convert this grid to columnar data expected by Altair
source = pd.DataFrame({'x': x.ravel(),
'y': y.ravel(),
'z': z.ravel()})
alt.Chart(source).mark_rect().encode(
x='x:O',
y='y:O',
color='z:Q'
)
# -
# ## Install the Vega Dataset
await micropip.install('vega_datasets')
# ## Interactive Average
# +
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
brush = alt.selection(type='interval', encodings=['x'])
bars = alt.Chart().mark_bar().encode(
x='month(date):O',
y='mean(precipitation):Q',
opacity=alt.condition(brush, alt.OpacityValue(1), alt.OpacityValue(0.7)),
).add_selection(
brush
)
line = alt.Chart().mark_rule(color='firebrick').encode(
y='mean(precipitation):Q',
size=alt.SizeValue(3)
).transform_filter(
brush
)
alt.layer(bars, line, data=source)
# -
# ## Locations of US Airports
# +
import altair as alt
from vega_datasets import data
airports = data.airports.url
states = alt.topo_feature(data.us_10m.url, feature='states')
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
width=500,
height=300
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).transform_aggregate(
latitude='mean(latitude)',
longitude='mean(longitude)',
count='count()',
groupby=['state']
).mark_circle().encode(
longitude='longitude:Q',
latitude='latitude:Q',
size=alt.Size('count:Q', title='Number of Airports'),
color=alt.value('steelblue'),
tooltip=['state:N','count:Q']
).properties(
title='Number of airports in US'
)
background + points
# -
| examples/pyolite - altair.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2.6 制作中文文档
#
# 起初,LaTeX只提供英文的编译环境,对包括中文在内的其他文字支持程度不好,伴随着LaTeX在文档编辑方面的优势越来越深入人心,LaTeX逐渐开始支持多种文字的编译。在各类LaTeX技术问答社区,我们经常会看到一些关于LaTeX中文文档编译的问题,十几年前,最受大家关心的问题或许还是如何使用LaTeX编译中文文档。
#
# ### 2.6.1 使用`ctex`宏包或`ctexart`文档类型
#
# 通常来说,最简单的方式在XeLaTeX编译环境下使用`ctex`宏包,即`\usepackage[UTF8]{ctex}`。
#
# 【**例1**】在LaTeX中选择XeLaTeX编译器,并使用`ctex`宏包制作一个简单的中文文档。
#
# ```tex
# \documentclass{article}
# \usepackage[UTF8]{ctex}
#
# \begin{document}
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
#
# \end{document}
# ```
#
# 编译后效果如图2.6.1所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example1.png" width="600" />
# </p>
#
# <center><b>图2.6.1</b> 编译后的文档</center>
#
# 当然,`ctex`中也有一种特定的文档类型,名为`ctexart`,使用这种文档类型即可制作中文文档。
#
# 【**例2**】在LaTeX中选择XeLaTeX编译器,并使用`ctexart`文档类型制作一个简单的中文文档。
#
# ```tex
# \documentclass{ctexart}
#
# \begin{document}
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
#
# \end{document}
# ```
#
# 编译后效果如图2.6.2所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example2.png" width="580" />
# </p>
#
# <center><b>图2.6.2</b> 编译后的文档</center>
#
# 在`ctexart`文档类型中,我们可自行设置字体类型,可供选择字体类型命令包括楷体 (`\kaishu`)、宋体 (`\songti`)、黑体 (`\heiti`)、仿宋 (`\fangsong`)。
#
# 【**例3**】在LaTeX中选择XeLaTeX编辑器,使用`ctexart`文档类型制作中文文档,将字体类型设置为楷体和黑体。
#
# ```tex
# \documentclass{ctexart}
#
# \begin{document}
#
# {\kaishu 【楷书】
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。}
#
# {\heiti 【黑体】
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。}
#
# \end{document}
# ```
#
# 编译后效果如图2.6.3所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example3.png" width="600" />
# </p>
#
# <center><b>图2.6.3</b> 编译后的文档</center>
#
# ### 2.6.2 使用`xeCJK`宏包
#
# `xeCJK`宏包是LaTeX中专门用于编译中文的工具包,申明调用该宏包的语句为`\usepackage{xeCJK}`。一般而言,常规的文档类型例如`article`均支持使用`xeCJK`宏包。
#
# 【**例3**】在LaTeX中选择XeLaTeX编译器,并在`article`文档类型中使用`xeCJK`宏包制作一个简单的中文文档。
#
# ```tex
# \documentclass{article}
# \usepackage{xeCJK}
#
# \begin{document}
#
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
#
# \end{document}
# ```
#
# 编译后效果如图2.6.4所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example4.png" width="600" />
# </p>
#
# <center><b>图2.6.4</b> 编译后的文档</center>
#
# ### 2.6.3 使用`CJKutf8`宏包
#
# `CJKutf8`宏包提供了一种编译中文的环境,即`\begin{CJK}{UTF8}{字体类型} \end{CJK}`,这里可以选择的字体类型有很多种,例如宋体 (gbsn)、楷体 (gkai)。需要注意的是,`CJKutf8`一般是在pdfLaTeX编译器中使用。
#
# 【**例4**】在LaTeX中选择pdfLaTeX编译器,使用`CJKutf8`宏包中的`\begin{CJK}{UTF8}{gkai} \end{CJK}`环境制作一个简单的中文文档。
#
# ```tex
# \documentclass{article}
# \usepackage{CJKutf8}
#
# \begin{document}
#
# \begin{CJK}{UTF8}{gkai}
# 永和九年,岁在癸丑,暮春之初,会于会稽山阴之兰亭,修稧(禊)事也。群贤毕至,少长咸集。此地有崇山峻领(岭),茂林修竹;又有清流激湍,映带左右,引以为流觞曲水,列坐其次。虽无丝竹管弦之盛,一觞一咏,亦足以畅叙幽情。
# \end{CJK}
#
# \end{document}
# ```
#
# 编译后效果如图2.6.5所示。
#
# <p align="center">
# <img align="middle" src="graphics/zh_example5.png" width="600" />
# </p>
#
# <center><b>图2.6.5</b> 编译后的文档</center>
#
# ### 2.6.4 制作报告
#
# 开源项目[https://github.com/MCG-NKU/NSFC-LaTex](https://github.com/MCG-NKU/NSFC-LaTex)提供了国家自然科学基金申报书的LaTeX模板。
# ### 2.6.5 推荐资源
#
# 目前在Overleaf上已经出现了许多中文文档LaTeX模板,除了一些学位论文模板,一些中文学术期刊如《计算机学报》也提供了科技论文的LaTeX模板。
#
# 《中国科学:信息科学》[https://www.overleaf.com/project/5e99712a0916c900018d11af](https://www.overleaf.com/project/5e99712a0916c900018d11af)
#
# 《计算机学报》[https://www.overleaf.com/project/5f4793c256c62e0001f06d95](https://www.overleaf.com/project/5f4793c256c62e0001f06d95)
#
# 中文学位论文模板:
#
# 《浙江大学学位论文模板》[https://www.overleaf.com/project/610fa05007d0073d5405a04f](https://www.overleaf.com/project/610fa05007d0073d5405a04f)
#
# 《武汉大学博士学位论文模板》[https://www.overleaf.com/project/610fa09e07d007fa5605a1e9](https://www.overleaf.com/project/610fa09e07d007fa5605a1e9)
#
# 《中山大学研究生毕业论文模板》[https://www.overleaf.com/project/610fa17307d007f2d305a388](https://www.overleaf.com/project/610fa17307d007f2d305a388)
#
# 《南京大学研究生毕业论文模板》[https://www.overleaf.com/project/610fa1d007d00704c305a3eb](https://www.overleaf.com/project/610fa1d007d00704c305a3eb)
# 【回放】[**2.5 一些基本命令**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-2/section5.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-2/section6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="http://cocl.us/DA0101EN_NotbookLink_Top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/TopAd.png" width="750" align="center">
# </a>
# </div>
#
# <a href="https://www.bigdatauniversity.com"><img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png" width = 300, align = "center"></a>
#
# <h1 align=center><font size = 5>Data Analysis with Python</font></h1>
# Exploratory Data Analysis
# <h3>Welcome!</h3>
# In this section, we will explore several methods to see if certain characteristics or features can be used to predict car price.
# <h2>Table of content</h2>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
# <li><a href="#import_data">Import Data from Module</a></li>
# <li><a href="#pattern_visualization">Analyzing Individual Feature Patterns using Visualization</a></li>
# <li><a href="#discriptive_statistics">Descriptive Statistical Analysis</a></li>
# <li><a href="#basic_grouping">Basics of Grouping</a></li>
# <li><a href="#correlation_causation">Correlation and Causation</a></li>
# <li><a href="#anova">ANOVA</a></li>
# </ol>
#
# Estimated Time Needed: <strong>30 min</strong>
# </div>
#
# <hr>
# <h3>What are the main characteristics which have the most impact on the car price?</h3>
# <h2 id="import_data">1. Import Data from Module 2</h2>
# <h4>Setup</h4>
# Import libraries
import pandas as pd
import numpy as np
# load data and store in dataframe df:
# This dataset was hosted on IBM Cloud object click <a href="https://cocl.us/cognitive_class_DA0101EN_objectstorage">HERE</a> for free storage
path='https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/automobileEDA.csv'
df = pd.read_csv(path)
df.head()
# <h2 id="pattern_visualization">2. Analyzing Individual Feature Patterns using Visualization</h2>
# To install seaborn we use the pip which is the python package manager.
# %%capture
# ! pip install seaborn
# Import visualization packages "Matplotlib" and "Seaborn", don't forget about "%matplotlib inline" to plot in a Jupyter notebook.
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# <h4>How to choose the right visualization method?</h4>
# <p>When visualizing individual variables, it is important to first understand what type of variable you are dealing with. This will help us find the right visualization method for that variable.</p>
#
# list the data types for each column
print(df.dtypes)
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h3>Question #1:</h3>
#
# <b>What is the data type of the column "peak-rpm"? </b>
# </div>
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# float64
#
# -->
# for example, we can calculate the correlation between variables of type "int64" or "float64" using the method "corr":
df.corr()
# The diagonal elements are always one; we will study correlation more precisely Pearson correlation in-depth at the end of the notebook.
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #2: </h1>
#
# <p>Find the correlation between the following columns: bore, stroke,compression-ratio , and horsepower.</p>
# <p>Hint: if you would like to select those columns use the following syntax: df[['bore','stroke' ,'compression-ratio','horsepower']]</p>
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# df[['bore', 'stroke', 'compression-ratio', 'horsepower']].corr()
#
# -->
# <h2>Continuous numerical variables:</h2>
#
# <p>Continuous numerical variables are variables that may contain any value within some range. Continuous numerical variables can have the type "int64" or "float64". A great way to visualize these variables is by using scatterplots with fitted lines.</p>
#
# <p>In order to start understanding the (linear) relationship between an individual variable and the price. We can do this by using "regplot", which plots the scatterplot plus the fitted regression line for the data.</p>
# Let's see several examples of different linear relationships:
# <h4>Positive linear relationship</h4>
# Let's find the scatterplot of "engine-size" and "price"
# Engine size as potential predictor variable of price
sns.regplot(x="engine-size", y="price", data=df)
plt.ylim(0,)
# <p>As the engine-size goes up, the price goes up: this indicates a positive direct correlation between these two variables. Engine size seems like a pretty good predictor of price since the regression line is almost a perfect diagonal line.</p>
# We can examine the correlation between 'engine-size' and 'price' and see it's approximately 0.87
df[["engine-size", "price"]].corr()
# Highway mpg is a potential predictor variable of price
sns.regplot(x="highway-mpg", y="price", data=df)
# <p>As the highway-mpg goes up, the price goes down: this indicates an inverse/negative relationship between these two variables. Highway mpg could potentially be a predictor of price.</p>
# We can examine the correlation between 'highway-mpg' and 'price' and see it's approximately -0.704
df[['highway-mpg', 'price']].corr()
# <h3>Weak Linear Relationship</h3>
# Let's see if "Peak-rpm" as a predictor variable of "price".
sns.regplot(x="peak-rpm", y="price", data=df)
# <p>Peak rpm does not seem like a good predictor of the price at all since the regression line is close to horizontal. Also, the data points are very scattered and far from the fitted line, showing lots of variability. Therefore it's it is not a reliable variable.</p>
# We can examine the correlation between 'peak-rpm' and 'price' and see it's approximately -0.101616
df[['peak-rpm','price']].corr()
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question 3 a): </h1>
#
# <p>Find the correlation between x="stroke", y="price".</p>
# <p>Hint: if you would like to select those columns use the following syntax: df[["stroke","price"]] </p>
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# #The correlation is 0.0823, the non-diagonal elements of the table.
# #code:
# df[["stroke","price"]].corr()
#
# -->
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1>Question 3 b):</h1>
#
# <p>Given the correlation results between "price" and "stroke" do you expect a linear relationship?</p>
# <p>Verify your results using the function "regplot()".</p>
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# #There is a weak correlation between the variable 'stroke' and 'price.' as such regression will not work well. We #can see this use "regplot" to demonstrate this.
#
# #Code:
# sns.regplot(x="stroke", y="price", data=df)
#
# -->
# <h3>Categorical variables</h3>
#
# <p>These are variables that describe a 'characteristic' of a data unit, and are selected from a small group of categories. The categorical variables can have the type "object" or "int64". A good way to visualize categorical variables is by using boxplots.</p>
# Let's look at the relationship between "body-style" and "price".
sns.boxplot(x="body-style", y="price", data=df)
# <p>We see that the distributions of price between the different body-style categories have a significant overlap, and so body-style would not be a good predictor of price. Let's examine engine "engine-location" and "price":</p>
sns.boxplot(x="engine-location", y="price", data=df)
# <p>Here we see that the distribution of price between these two engine-location categories, front and rear, are distinct enough to take engine-location as a potential good predictor of price.</p>
# Let's examine "drive-wheels" and "price".
# drive-wheels
sns.boxplot(x="drive-wheels", y="price", data=df)
# <p>Here we see that the distribution of price between the different drive-wheels categories differs; as such drive-wheels could potentially be a predictor of price.</p>
# <h2 id="discriptive_statistics">3. Descriptive Statistical Analysis</h2>
# <p>Let's first take a look at the variables by utilizing a description method.</p>
#
# <p>The <b>describe</b> function automatically computes basic statistics for all continuous variables. Any NaN values are automatically skipped in these statistics.</p>
#
# This will show:
# <ul>
# <li>the count of that variable</li>
# <li>the mean</li>
# <li>the standard deviation (std)</li>
# <li>the minimum value</li>
# <li>the IQR (Interquartile Range: 25%, 50% and 75%)</li>
# <li>the maximum value</li>
# <ul>
#
# We can apply the method "describe" as follows:
df.describe()
# The default setting of "describe" skips variables of type object. We can apply the method "describe" on the variables of type 'object' as follows:
df.describe(include=['object'])
# <h3>Value Counts</h3>
# <p>Value-counts is a good way of understanding how many units of each characteristic/variable we have. We can apply the "value_counts" method on the column 'drive-wheels'. Don’t forget the method "value_counts" only works on Pandas series, not Pandas Dataframes. As a result, we only include one bracket "df['drive-wheels']" not two brackets "df[['drive-wheels']]".</p>
df['drive-wheels'].value_counts()
# We can convert the series to a Dataframe as follows :
df['drive-wheels'].value_counts().to_frame()
# Let's repeat the above steps but save the results to the dataframe "drive_wheels_counts" and rename the column 'drive-wheels' to 'value_counts'.
drive_wheels_counts = df['drive-wheels'].value_counts().to_frame()
drive_wheels_counts.rename(columns={'drive-wheels': 'value_counts'}, inplace=True)
drive_wheels_counts
# Now let's rename the index to 'drive-wheels':
drive_wheels_counts.index.name = 'drive-wheels'
drive_wheels_counts
# We can repeat the above process for the variable 'engine-location'.
# engine-location as variable
engine_loc_counts = df['engine-location'].value_counts().to_frame()
engine_loc_counts.rename(columns={'engine-location': 'value_counts'}, inplace=True)
engine_loc_counts.index.name = 'engine-location'
engine_loc_counts.head(10)
# <p>Examining the value counts of the engine location would not be a good predictor variable for the price. This is because we only have three cars with a rear engine and 198 with an engine in the front, this result is skewed. Thus, we are not able to draw any conclusions about the engine location.</p>
# <h2 id="basic_grouping">4. Basics of Grouping</h2>
# <p>The "groupby" method groups data by different categories. The data is grouped based on one or several variables and analysis is performed on the individual groups.</p>
#
# <p>For example, let's group by the variable "drive-wheels". We see that there are 3 different categories of drive wheels.</p>
df['drive-wheels'].unique()
# <p>If we want to know, on average, which type of drive wheel is most valuable, we can group "drive-wheels" and then average them.</p>
#
# <p>We can select the columns 'drive-wheels', 'body-style' and 'price', then assign it to the variable "df_group_one".</p>
df_group_one = df[['drive-wheels','body-style','price']]
# We can then calculate the average price for each of the different categories of data.
# grouping results
df_group_one = df_group_one.groupby(['drive-wheels'],as_index=False).mean()
df_group_one
# <p>From our data, it seems rear-wheel drive vehicles are, on average, the most expensive, while 4-wheel and front-wheel are approximately the same in price.</p>
#
# <p>You can also group with multiple variables. For example, let's group by both 'drive-wheels' and 'body-style'. This groups the dataframe by the unique combinations 'drive-wheels' and 'body-style'. We can store the results in the variable 'grouped_test1'.</p>
# grouping results
df_gptest = df[['drive-wheels','body-style','price']]
grouped_test1 = df_gptest.groupby(['drive-wheels','body-style'],as_index=False).mean()
grouped_test1
# <p>This grouped data is much easier to visualize when it is made into a pivot table. A pivot table is like an Excel spreadsheet, with one variable along the column and another along the row. We can convert the dataframe to a pivot table using the method "pivot " to create a pivot table from the groups.</p>
#
# <p>In this case, we will leave the drive-wheel variable as the rows of the table, and pivot body-style to become the columns of the table:</p>
grouped_pivot = grouped_test1.pivot(index='drive-wheels',columns='body-style')
grouped_pivot
# <p>Often, we won't have data for some of the pivot cells. We can fill these missing cells with the value 0, but any other value could potentially be used as well. It should be mentioned that missing data is quite a complex subject and is an entire course on its own.</p>
grouped_pivot = grouped_pivot.fillna(0) #fill missing values with 0
grouped_pivot
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1>Question 4:</h1>
#
# <p>Use the "groupby" function to find the average "price" of each car based on "body-style" ? </p>
# </div>
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# # grouping results
# df_gptest2 = df[['body-style','price']]
# grouped_test_bodystyle = df_gptest2.groupby(['body-style'],as_index= False).mean()
# grouped_test_bodystyle
#
# -->
# If you did not import "pyplot" let's do it again.
import matplotlib.pyplot as plt
# %matplotlib inline
# <h4>Variables: Drive Wheels and Body Style vs Price</h4>
# Let's use a heat map to visualize the relationship between Body Style vs Price.
#use the grouped results
plt.pcolor(grouped_pivot, cmap='RdBu')
plt.colorbar()
plt.show()
# <p>The heatmap plots the target variable (price) proportional to colour with respect to the variables 'drive-wheel' and 'body-style' in the vertical and horizontal axis respectively. This allows us to visualize how the price is related to 'drive-wheel' and 'body-style'.</p>
#
# <p>The default labels convey no useful information to us. Let's change that:</p>
# +
fig, ax = plt.subplots()
im = ax.pcolor(grouped_pivot, cmap='RdBu')
#label names
row_labels = grouped_pivot.columns.levels[1]
col_labels = grouped_pivot.index
#move ticks and labels to the center
ax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False)
#insert labels
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(col_labels, minor=False)
#rotate label if too long
plt.xticks(rotation=90)
fig.colorbar(im)
plt.show()
# -
# <p>Visualization is very important in data science, and Python visualization packages provide great freedom. We will go more in-depth in a separate Python Visualizations course.</p>
#
# <p>The main question we want to answer in this module, is "What are the main characteristics which have the most impact on the car price?".</p>
#
# <p>To get a better measure of the important characteristics, we look at the correlation of these variables with the car price, in other words: how is the car price dependent on this variable?</p>
# <h2 id="correlation_causation">5. Correlation and Causation</h2>
# <p><b>Correlation</b>: a measure of the extent of interdependence between variables.</p>
#
# <p><b>Causation</b>: the relationship between cause and effect between two variables.</p>
#
# <p>It is important to know the difference between these two and that correlation does not imply causation. Determining correlation is much simpler the determining causation as causation may require independent experimentation.</p>
# <p3>Pearson Correlation</p>
# <p>The Pearson Correlation measures the linear dependence between two variables X and Y.</p>
# <p>The resulting coefficient is a value between -1 and 1 inclusive, where:</p>
# <ul>
# <li><b>1</b>: Total positive linear correlation.</li>
# <li><b>0</b>: No linear correlation, the two variables most likely do not affect each other.</li>
# <li><b>-1</b>: Total negative linear correlation.</li>
# </ul>
# <p>Pearson Correlation is the default method of the function "corr". Like before we can calculate the Pearson Correlation of the of the 'int64' or 'float64' variables.</p>
df.corr()
# sometimes we would like to know the significant of the correlation estimate.
# <b>P-value</b>:
# <p>What is this P-value? The P-value is the probability value that the correlation between these two variables is statistically significant. Normally, we choose a significance level of 0.05, which means that we are 95% confident that the correlation between the variables is significant.</p>
#
# By convention, when the
# <ul>
# <li>p-value is $<$ 0.001: we say there is strong evidence that the correlation is significant.</li>
# <li>the p-value is $<$ 0.05: there is moderate evidence that the correlation is significant.</li>
# <li>the p-value is $<$ 0.1: there is weak evidence that the correlation is significant.</li>
# <li>the p-value is $>$ 0.1: there is no evidence that the correlation is significant.</li>
# </ul>
# We can obtain this information using "stats" module in the "scipy" library.
from scipy import stats
# <h3>Wheel-base vs Price</h3>
# Let's calculate the Pearson Correlation Coefficient and P-value of 'wheel-base' and 'price'.
pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
# <h5>Conclusion:</h5>
# <p>Since the p-value is $<$ 0.001, the correlation between wheel-base and price is statistically significant, although the linear relationship isn't extremely strong (~0.585)</p>
# <h3>Horsepower vs Price</h3>
# Let's calculate the Pearson Correlation Coefficient and P-value of 'horsepower' and 'price'.
pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
# <h5>Conclusion:</h5>
#
# <p>Since the p-value is $<$ 0.001, the correlation between horsepower and price is statistically significant, and the linear relationship is quite strong (~0.809, close to 1)</p>
# <h3>Length vs Price</h3>
#
# Let's calculate the Pearson Correlation Coefficient and P-value of 'length' and 'price'.
pearson_coef, p_value = stats.pearsonr(df['length'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
# <h5>Conclusion:</h5>
# <p>Since the p-value is $<$ 0.001, the correlation between length and price is statistically significant, and the linear relationship is moderately strong (~0.691).</p>
# <h3>Width vs Price</h3>
# Let's calculate the Pearson Correlation Coefficient and P-value of 'width' and 'price':
pearson_coef, p_value = stats.pearsonr(df['width'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value )
# ##### Conclusion:
#
# Since the p-value is < 0.001, the correlation between width and price is statistically significant, and the linear relationship is quite strong (~0.751).
# ### Curb-weight vs Price
# Let's calculate the Pearson Correlation Coefficient and P-value of 'curb-weight' and 'price':
pearson_coef, p_value = stats.pearsonr(df['curb-weight'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
# <h5>Conclusion:</h5>
# <p>Since the p-value is $<$ 0.001, the correlation between curb-weight and price is statistically significant, and the linear relationship is quite strong (~0.834).</p>
# <h3>Engine-size vs Price</h3>
#
# Let's calculate the Pearson Correlation Coefficient and P-value of 'engine-size' and 'price':
pearson_coef, p_value = stats.pearsonr(df['engine-size'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
# <h5>Conclusion:</h5>
#
# <p>Since the p-value is $<$ 0.001, the correlation between engine-size and price is statistically significant, and the linear relationship is very strong (~0.872).</p>
# <h3>Bore vs Price</h3>
# Let's calculate the Pearson Correlation Coefficient and P-value of 'bore' and 'price':
pearson_coef, p_value = stats.pearsonr(df['bore'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
# <h5>Conclusion:</h5>
# <p>Since the p-value is $<$ 0.001, the correlation between bore and price is statistically significant, but the linear relationship is only moderate (~0.521).</p>
# We can relate the process for each 'City-mpg' and 'Highway-mpg':
# <h3>City-mpg vs Price</h3>
pearson_coef, p_value = stats.pearsonr(df['city-mpg'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
# <h5>Conclusion:</h5>
# <p>Since the p-value is $<$ 0.001, the correlation between city-mpg and price is statistically significant, and the coefficient of ~ -0.687 shows that the relationship is negative and moderately strong.</p>
# <h3>Highway-mpg vs Price</h3>
pearson_coef, p_value = stats.pearsonr(df['highway-mpg'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
# ##### Conclusion:
# Since the p-value is < 0.001, the correlation between highway-mpg and price is statistically significant, and the coefficient of ~ -0.705 shows that the relationship is negative and moderately strong.
# <h2 id="anova">6. ANOVA</h2>
# <h3>ANOVA: Analysis of Variance</h3>
# <p>The Analysis of Variance (ANOVA) is a statistical method used to test whether there are significant differences between the means of two or more groups. ANOVA returns two parameters:</p>
#
# <p><b>F-test score</b>: ANOVA assumes the means of all groups are the same, calculates how much the actual means deviate from the assumption, and reports it as the F-test score. A larger score means there is a larger difference between the means.</p>
#
# <p><b>P-value</b>: P-value tells how statistically significant is our calculated score value.</p>
#
# <p>If our price variable is strongly correlated with the variable we are analyzing, expect ANOVA to return a sizeable F-test score and a small p-value.</p>
# <h3>Drive Wheels</h3>
# <p>Since ANOVA analyzes the difference between different groups of the same variable, the groupby function will come in handy. Because the ANOVA algorithm averages the data automatically, we do not need to take the average before hand.</p>
#
# <p>Let's see if different types 'drive-wheels' impact 'price', we group the data.</p>
# Let's see if different types 'drive-wheels' impact 'price', we group the data.
grouped_test2=df_gptest[['drive-wheels', 'price']].groupby(['drive-wheels'])
grouped_test2.head(2)
df_gptest
# We can obtain the values of the method group using the method "get_group".
grouped_test2.get_group('4wd')['price']
# we can use the function 'f_oneway' in the module 'stats' to obtain the <b>F-test score</b> and <b>P-value</b>.
# +
# ANOVA
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'], grouped_test2.get_group('4wd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
# -
# This is a great result, with a large F test score showing a strong correlation and a P value of almost 0 implying almost certain statistical significance. But does this mean all three tested groups are all this highly correlated?
# #### Separately: fwd and rwd
# +
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val )
# -
# Let's examine the other groups
# #### 4wd and rwd
# +
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
# -
# <h4>4wd and fwd</h4>
# +
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('fwd')['price'])
print("ANOVA results: F=", f_val, ", P =", p_val)
# -
# <h3>Conclusion: Important Variables</h3>
# <p>We now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. We have narrowed it down to the following variables:</p>
#
# Continuous numerical variables:
# <ul>
# <li>Length</li>
# <li>Width</li>
# <li>Curb-weight</li>
# <li>Engine-size</li>
# <li>Horsepower</li>
# <li>City-mpg</li>
# <li>Highway-mpg</li>
# <li>Wheel-base</li>
# <li>Bore</li>
# </ul>
#
# Categorical variables:
# <ul>
# <li>Drive-wheels</li>
# </ul>
#
# <p>As we now move into building machine learning models to automate our analysis, feeding the model with variables that meaningfully affect our target variable will improve our model's prediction performance.</p>
# <h1>Thank you for completing this notebook</h1>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# <p><a href="https://cocl.us/DA0101EN_NotbookLink_Top_bottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png" width="750" align="center"></a></p>
# </div>
#
# <h3>About the Authors:</h3>
#
# This notebook was written by <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank"><NAME> PhD</a>, <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a>, <NAME>, <NAME>, <NAME>, Parizad, <NAME> and <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank"><NAME></a> and <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>.
#
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| Data Analysis with python/DA0101EN-Review-Exploratory-Data-Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ojm_6E9f9Kcf"
# # GRU 228
# * Operate on 16000 GenCode 34 seqs.
# * 5-way cross validation. Save best model per CV.
# * Report mean accuracy from final re-validation with best 5.
# * Use Adam with a learn rate decay schdule.
# + id="hh6XplUvC0j0" outputId="68d518f7-848e-44a3-d601-e37e252f9a5a" colab={"base_uri": "https://localhost:8080/"}
NC_FILENAME='ncRNA.gc34.processed.fasta'
PC_FILENAME='pcRNA.gc34.processed.fasta'
DATAPATH=""
try:
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
NC_FILENAME = DATAPATH+NC_FILENAME
PC_FILENAME = DATAPATH+PC_FILENAME
except:
IN_COLAB = False
DATAPATH=""
EPOCHS=200
SPLITS=5
K=3
VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'
EMBED_DIMEN=16
FILENAME='GRU228'
NEURONS=32
DROP=0.5
ACT="tanh"
# + id="VQY7aTj29Kch"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
from tensorflow import keras
from keras.wrappers.scikit_learn import KerasRegressor
from keras.models import Sequential
from keras.layers import Bidirectional
from keras.layers import GRU
from keras.layers import Dense
from keras.layers import LayerNormalization
import time
dt='float32'
tf.keras.backend.set_floatx(dt)
# + [markdown] id="j7jcg6Wl9Kc2"
# ## Build model
# + id="qLFNO1Xa9Kc3"
def compile_model(model):
adam_default_learn_rate = 0.001
schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = adam_default_learn_rate*10,
#decay_steps=100000, decay_rate=0.96, staircase=True)
decay_steps=10000, decay_rate=0.99, staircase=True)
# learn rate = initial_learning_rate * decay_rate ^ (step / decay_steps)
alrd = tf.keras.optimizers.Adam(learning_rate=schedule)
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
print("COMPILE...")
#model.compile(loss=bc, optimizer=alrd, metrics=["accuracy"])
model.compile(loss=bc, optimizer="adam", metrics=["accuracy"])
print("...COMPILED")
return model
def build_model():
embed_layer = keras.layers.Embedding(
#VOCABULARY_SIZE, EMBED_DIMEN, input_length=1000, input_length=1000, mask_zero=True)
#input_dim=[None,VOCABULARY_SIZE], output_dim=EMBED_DIMEN, mask_zero=True)
input_dim=VOCABULARY_SIZE, output_dim=EMBED_DIMEN, mask_zero=True)
#rnn1_layer = keras.layers.Bidirectional(
# keras.layers.GRU(NEURONS, return_sequences=True,
# input_shape=[1000,EMBED_DIMEN], activation=ACT, dropout=DROP) )#bi
#rnn2_layer = keras.layers.Bidirectional(
# keras.layers.GRU(NEURONS, return_sequences=False,
# activation=ACT, dropout=DROP) )#bi
rnn0_layer = keras.layers.Bidirectional(
keras.layers.GRU(NEURONS, return_sequences=False,
input_shape=[1000,EMBED_DIMEN], activation=ACT, dropout=DROP) )#bi
dense1_layer = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)
drop1_layer = keras.layers.Dropout(DROP)
#dense2_layer = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)
#drop2_layer = keras.layers.Dropout(DROP)
output_layer = keras.layers.Dense(1, activation="sigmoid", dtype=dt)
mlp = keras.models.Sequential()
mlp.add(embed_layer)
#mlp.add(rnn1_layer)
#mlp.add(rnn2_layer)
mlp.add(rnn0_layer)
mlp.add(dense1_layer)
mlp.add(drop1_layer)
#mlp.add(dense2_layer)
#mlp.add(drop2_layer)
mlp.add(output_layer)
mlpc = compile_model(mlp)
return mlpc
# + [markdown] id="WV6k-xOm9Kcn"
# ## Load and partition sequences
# + id="1I-O_qzw9Kco"
# Assume file was preprocessed to contain one line per seq.
# Prefer Pandas dataframe but df does not support append.
# For conversion to tensor, must avoid python lists.
def load_fasta(filename,label):
DEFLINE='>'
labels=[]
seqs=[]
lens=[]
nums=[]
num=0
with open (filename,'r') as infile:
for line in infile:
if line[0]!=DEFLINE:
seq=line.rstrip()
num += 1 # first seqnum is 1
seqlen=len(seq)
nums.append(num)
labels.append(label)
seqs.append(seq)
lens.append(seqlen)
df1=pd.DataFrame(nums,columns=['seqnum'])
df2=pd.DataFrame(labels,columns=['class'])
df3=pd.DataFrame(seqs,columns=['sequence'])
df4=pd.DataFrame(lens,columns=['seqlen'])
df=pd.concat((df1,df2,df3,df4),axis=1)
return df
def separate_X_and_y(data):
y= data[['class']].copy()
X= data.drop(columns=['class','seqnum','seqlen'])
return (X,y)
# + [markdown] id="nRAaO9jP9Kcr"
# ## Make K-mers
# + id="e8xcZ4Mr9Kcs"
def make_kmer_table(K):
npad='N'*K
shorter_kmers=['']
for i in range(K):
longer_kmers=[]
for mer in shorter_kmers:
longer_kmers.append(mer+'A')
longer_kmers.append(mer+'C')
longer_kmers.append(mer+'G')
longer_kmers.append(mer+'T')
shorter_kmers = longer_kmers
all_kmers = shorter_kmers
kmer_dict = {}
kmer_dict[npad]=0
value=1
for mer in all_kmers:
kmer_dict[mer]=value
value += 1
return kmer_dict
KMER_TABLE=make_kmer_table(K)
def strings_to_vectors(data,uniform_len):
all_seqs=[]
for seq in data['sequence']:
i=0
seqlen=len(seq)
kmers=[]
while i < seqlen-K+1 -1: # stop at minus one for spaced seed
#kmer=seq[i:i+2]+seq[i+3:i+5] # SPACED SEED 2/1/2 for K=4
kmer=seq[i:i+K]
i += 1
value=KMER_TABLE[kmer]
kmers.append(value)
pad_val=0
while i < uniform_len:
kmers.append(pad_val)
i += 1
all_seqs.append(kmers)
pd2d=pd.DataFrame(all_seqs)
return pd2d # return 2D dataframe, uniform dimensions
# + id="sEtA0xiV9Kcv"
def make_kmers(MAXLEN,train_set):
(X_train_all,y_train_all)=separate_X_and_y(train_set)
X_train_kmers=strings_to_vectors(X_train_all,MAXLEN)
# From pandas dataframe to numpy to list to numpy
num_seqs=len(X_train_kmers)
tmp_seqs=[]
for i in range(num_seqs):
kmer_sequence=X_train_kmers.iloc[i]
tmp_seqs.append(kmer_sequence)
X_train_kmers=np.array(tmp_seqs)
tmp_seqs=None
labels=y_train_all.to_numpy()
return (X_train_kmers,labels)
# + id="jaXyySyO9Kcz"
def make_frequencies(Xin):
Xout=[]
VOCABULARY_SIZE= 4**K + 1 # plus one for 'NNN'
for seq in Xin:
freqs =[0] * VOCABULARY_SIZE
total = 0
for kmerval in seq:
freqs[kmerval] += 1
total += 1
for c in range(VOCABULARY_SIZE):
freqs[c] = freqs[c]/total
Xout.append(freqs)
Xnum = np.asarray(Xout)
return (Xnum)
def make_slice(data_set,min_len,max_len):
slice = data_set.query('seqlen <= '+str(max_len)+' & seqlen>= '+str(min_len))
return slice
# + [markdown] id="LdIS2utq9Kc9"
# ## Cross validation
# + id="BVo4tbB_9Kc-"
def do_cross_validation(X,y,given_model):
cv_scores = []
fold=0
splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1, random_state=37863)
for train_index,valid_index in splitter.split(X):
fold += 1
X_train=X[train_index] # use iloc[] for dataframe
y_train=y[train_index]
X_valid=X[valid_index]
y_valid=y[valid_index]
# Avoid continually improving the same model.
model = compile_model(keras.models.clone_model(given_model))
bestname=DATAPATH+FILENAME+".cv."+str(fold)+".best"
mycallbacks = [keras.callbacks.ModelCheckpoint(
filepath=bestname, save_best_only=True,
monitor='val_accuracy', mode='max')]
print("FIT")
start_time=time.time()
history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely
epochs=EPOCHS, verbose=1, # verbose=1 for ascii art, verbose=0 for none
callbacks=mycallbacks,
validation_data=(X_valid,y_valid) )
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
best_model=keras.models.load_model(bestname)
scores = best_model.evaluate(X_valid, y_valid, verbose=0)
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
cv_scores.append(scores[1] * 100)
print()
print("%d-way Cross Validation mean %.2f%% (+/- %.2f%%)" % (fold, np.mean(cv_scores), np.std(cv_scores)))
# + [markdown] id="qd3Wj_vI9KdP"
# ## Train on RNA lengths 200-1Kb
# + id="f8fNo6sn9KdH" outputId="60554121-04e6-4888-8582-3a185b255d01" colab={"base_uri": "https://localhost:8080/"}
MINLEN=200
MAXLEN=1000
print("Load data from files.")
nc_seq=load_fasta(NC_FILENAME,0)
pc_seq=load_fasta(PC_FILENAME,1)
train_set=pd.concat((nc_seq,pc_seq),axis=0)
nc_seq=None
pc_seq=None
print("Ready: train_set")
#train_set
subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
print ("Data reshape")
(X_train,y_train)=make_kmers(MAXLEN,subset)
#print ("Data prep")
#X_train=make_frequencies(X_train)
# + id="G1HuSs8ZbeL4" outputId="f559c730-a957-424b-8a2d-ed5897b967d3" colab={"base_uri": "https://localhost:8080/"}
print ("Compile the model")
model=build_model()
print ("Summarize the model")
print(model.summary()) # Print this only once
model.save(DATAPATH+FILENAME+'.model')
# + id="mQ8eW5Rg9KdQ" outputId="a76f3dc9-77eb-4d8d-<PASSWORD>-1<PASSWORD>" colab={"base_uri": "https://localhost:8080/", "height": 1000}
print ("Cross valiation")
do_cross_validation(X_train,y_train,model)
print ("Done")
# + id="p4fh2GI8beMQ"
| notebooks/GRU_228.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ordered Pitch Class Interval (UPCI)
import music21
def get_opci(x, y):
"""
Get Straus' OPCI
'y - x mod12'
"""
x = music21.pitch.Pitch(x).pitchClass
y = music21.pitch.Pitch(y).pitchClass
opci = (y - x) % 12
return opci
get_opci('C4','G5')
| Ordered Pitch Class Interval.ipynb |
/* --- */
/* jupyter: */
/* jupytext: */
/* text_representation: */
/* extension: .mac */
/* format_name: light */
/* format_version: '1.5' */
/* jupytext_version: 1.14.4 */
/* kernelspec: */
/* display_name: Maxima */
/* language: maxima */
/* name: maxima */
/* --- */
/* # Generating function analysis of the diffusion approximation to the birth-immigration-death process */
/* */
/* <!-- <center><font size="+4">Generating function analysis of the diffusion approximation to the birth-immigration-death process</font></center> --> */
/* + [markdown] tags=[]
/* ## Updated solution check for the diffusion approximation */
/* + [markdown] tags=[]
/* ### Check the solution to the characteristic equation. */
/* + tags=[]
chartu: logcontract(integrate(1/(-D * theta^2 + s * theta), theta));
/* + tags=[]
chartsolu: logcontract(chartu - subst(theta[0], theta, chartu));
/* + tags=[]
chartsoltheta: solve([chartsolu=u], theta);
/* -
/* `\label{eq:diffapprox-theta-char}` */
/* + tags=[]
chartheta : s * theta[0] / (D * (1 - %e^(-s*u)) * theta[0] + s * %e^(-s * u));
/* + tags=[]
solve(chartheta=theta,theta[0]);
/* + tags=[]
subst(0, u, chartheta);
/* + tags=[]
chareqn(theta) := diff(theta, u) + D * theta^2 - s * theta;
/* + tags=[]
chareqn(theta(u));
/* + tags=[]
factor(chareqn(chartheta));
/* + [markdown] tags=[]
/* ### Check the solution for the cumulant generating function. */
/* + tags=[]
cgfsol : s * %e^(s * tau) * theta/ (D* (%e^(s * tau) - 1) * theta+ s);
/* + tags=[]
gfeqn (gam) := diff(gam, tau) + theta * (D*theta - s) * diff(gam, theta);
/* + tags=[]
gfeqn(Gamma(theta,tau));
/* + tags=[]
factor(gfeqn(cgfsol));
/* + [markdown] tags=[]
/* ### Find the characteristic which passes through a given point ($\theta_f$, $\tau_f$). */
/* + tags=[]
th0sol : solve([theta[f] = subst(u[f], u, chartheta)], [theta[0]]);
/* + tags=[]
factor(psubst(th0sol, chartheta));
/* + tags=[]
factor(subst(tau[0]-tau[f], u[f], psubst(th0sol, chartheta)));
/* + tags=[]
(theta[f]*s*%e^(uf*s))/
(s*%e^(s*u)-D*theta[f]*%e^(s*u)+D*theta[f]*%e^(uf*s));
/* + [markdown] tags=[]
/* ### Integrate along that characteristic. */
/* + tags=[]
ginhint : integrate((theta[f]*s*%e^(uf*s))/
(s*%e^(s*u)-D*theta[f]*%e^(s*u)+D*theta[f]*%e^(uf*s)), u);
/* + tags=[]
ginhint : integrate((theta[f]*s*%e^(u*s))/
(s*%e^(s*uf)-D*theta[f]*%e^(s*uf)+D*theta[f]*%e^(u*s)), u);
/* + [markdown] tags=[]
/* ### Check the resulting inhomogenous solution to the c.g.f. equation. */
/* + tags=[]
logcontract(ev(factor(subst(uf, u, ginhint) - subst(0, u, ginhint)), logexpand=super));
/* + tags=[]
gaminh: M*psubst([theta[f]=theta, uf=tau], logcontract(ev(factor(subst(uf, u, ginhint) - subst(0, u, ginhint)))));
/* + tags=[]
gaminhreform: -(M/D) * log( (D/s) * (%e^(-s * tau) - 1) * theta + 1);
/* + tags=[]
logcontract(gaminh - gaminhreform);
/* + tags=[]
gaminh : -(M/D) * log( (D/s) * (%e^(s * tau) - 1) * theta + 1);
/* + tags=[]
factor(gfeqn(gaminh));
/* + tags=[]
factor(gfeqn(gaminh));
/* + tags=[]
mgfinh : ( (D/s) * (%e^(s * tau) - 1) * theta + 1)^(-M/D);
/* + tags=[]
factor(gfeqn(mgfinh) + M * theta * mgfinh);
/* + [markdown] tags=[]
/* ### Check the old solution for the probability density. */
/* + tags=[]
difeqn(rho(x,tau));
/* + tags=[]
difeqn (rho) := D * diff(x * rho, x, 2) - s * diff(x * rho, x)
- M * diff(rho, x) - diff(rho, tau);
/* + tags=[]
difeqn(rho(x,tau));
/* -
goodrho : %e^((s*x/D) / (1 - %e^(s*tau))) *
(%e^(s*tau) - 1)^(-M/D) * x^(M/D - 1);
/* + tags=[]
oldrho : %e^(-(s * x * %e^(-s * tau) / D) / (1 - %e^(-s * tau))) *
/* %e^(-s * tau) * (1 - %e^(-s * tau))^(M/D - 2);
/* -
oldrho : %e^(-(s * x * %e^(-s * tau) / D) / (1 - %e^(-s * tau))) *
/* %e^(-s * tau) * (1 - %e^(-s * tau))^(M/D - 2);
newrho : %e^((s*x/D) / (1 - %e^(-s*tau))) *
(%e^(-s*tau) - 1)^(M/D) * x^(-1 - M/D);
factor(difeqn(newrho));
/* + tags=[]
factor(difeqn(oldrho));
/* -
oldrho : %e^(-(s * x * %e^(-s * tau) / D) / (1 - %e^(-s * tau))) *
/* %e^(-s * tau) * (1 - %e^(-s * tau))^(M/D - 2);
factor(difeqn(oldrho));
factor(difeqn(goodrho));
/* + [markdown] tags=[]
/* ### Homogeneous inverse Laplace transform */
/* -
expand(subst(c * thp - 1/b,
theta, x * theta + a * theta / (b * theta + 1)));
expand(subst((a^(1/2) * x^(-1/2) * thp - 1)/b, theta,
x * theta + a * theta / (b * theta + 1)));
factor(psubst([a = x0 * %e^(-s*tau),
b = (D/2) * (%e^(-s*tau)- 1)],
a/b - x/b));
factor(psubst([a = x0 * %e^(-s*tau),
b = (D/2) * (%e^(-s*tau)- 1)],
(sqrt(a)*sqrt(x))/b));
/* + [markdown] tags=[]
/* ### Inhomogeneous inverse Laplace transfom */
/* Trying to solve for the probability density. */
/* + tags=[]
inhdistpart2: factor(subst(tp/x + (s/D)/(1 - %e^(-s*tau)), theta, mgfinh));
/* + tags=[]
inhdistpart1: expand(subst(tp/x + (s/D)/(1 - %e^(-s*tau)), theta, x*theta));
/* + tags=[]
inhdist: 1/x*%e^(inhdistpart1)*inhdistpart2;
/* + tags=[]
newrho : %e^((s*x/D) / (1 - %e^(-s*tau))) *
(%e^(-s*tau) - 1)^(M/D) * x^(-1 - M/D);
/* + tags=[]
neweqn (rho) := -D * diff(x * rho, x, 2) + s * diff(x * rho, x)
- M * diff(rho, x) - diff(rho, tau);
/* + tags=[]
neweqn(rho(x,tau));
/* + tags=[]
factor(neweqn(newrho));
/* + tags=[]
factor(difeqn(newrho));
/* + [markdown] tags=[]
/* ## Moment generating function */
/* + tags=[]
genfunc (x, t, n0, kb, kd) :=
((kd * %e^(- (kb - kd) * t) - kd +
(kd - kb * %e^(- (kb - kd) * t)) * x) /
(kd * %e^(- (kb - kd) * t) - kb +
(kb - kb * %e^(- (kb - kd) * t)) * x))^n0;
/* -
/* Extract the probability distribution */
/* + tags=[]
probdist (m, t, n0, kb, kd) :=
block([gf],
gf : genfunc(z, t, n0, kb, kd),
makelist([j, subst (0, z, diff(gf, z, j) / (j!))],
j, 0, m))$
/* -
/* Reparameterize it according to the diffusion approximation. */
/* + tags=[]
factor(genfunc(z / N, N * tau, N * x0, D + s/(2*N), D - s/(2*N)));
/* + tags=[]
renoprob (m, N, tau, x0, D, s) :=
map (lambda ([pa], [pa[1] / N, pa[2] * N]),
probdist (m, N * tau, N * x0, D + s/(2*N), D - s/(2*N)))$
/* -
/* Plot with values $\tau = 1.0, x_0 = 3.0, D = 1.0, s = 1.0$ */
/* + tags=[]
set_plot_option([svg_file, "maxplot.svg"])$
/* + tags=[]
plot2d([[discrete, renoprob(30, 1, 1.0, 3, 1.0, 1.0)],
[discrete, renoprob(60, 2, 1.0, 3, 1.0, 1.0)],
[discrete, renoprob(90, 3, 1.0, 3, 1.0, 1.0)],
[discrete, renoprob(120, 4, 1.0, 3, 1.0, 1.0)]],
[legend, "N=1", "N=2", "N=3", "N=4"],
[xlabel, "x"]);
/* -
/* Check that it is normalized and extract fixation probability. */
/* + tags=[]
genfunc(0, t, n0, kb, kd);
factor(genfunc(0, N * tau, N * x0, D + s/(2*N), D - s/(2*N)));
genfunc(1, t, n0, kb, kd);
/* + [markdown] tags=[] toc-hr-collapsed=true tags=[]
/* ## Functions to compute moments */
/* + tags=[]
numop(n, gf) :=
if n=0 then gf
else numop(n-1, z * diff(gf, z))$
mom(m, t, n0, kb, kd) :=
factor(subst(1, z, numop(m, genfunc(z, t, n0, kb, kd))))$
momren(m, t, n0, kb, kd) :=
factor(psubst([t = N * tau,
n0 = N * x0,
kb = D + s/(2*N),
kd = D - s/(2*N)],
mom(m, t, n0, kb, kd) / N^m))$
cumu(m, t, n0, kb, kd) :=
factor(subst(1, z, numop(m,
log(genfunc(z, t, n0, kb, kd)))))$
cumuren(m, zt, n0, kb, kd) :=
factor(psubst([t = N * tau,
n0 = N * x0,
kb = D + s/(2*N),
kd = D - s/(2*N)],
cumu(m, t, n0, kb, kd) / N^m))$
/* + [markdown] tags=[] toc-hr-collapsed=true tags=[]
/* ## Compute cumulants */
/* + tags=[]
cumu(1, t, n0, kb, kd);
cumu(2, t, n0, kb, kd);
cumu(3, t, n0, kb, kd);
cumu(4, t, n0, kb, kd);
/* -
/* Reparameterize cumulants and collect powers of $N$. */
/* + tags=[]
cumuren(1, t, n0, kb, kd);
cumuren(2, t, n0, kb, kd);
/* + tags=[]
block([mo],
mo : expand(cumuren(3, t, n0, kb, kd)),
factor(coeff (mo, N, 0)) +
factor(coeff (mo, N, -2)) / N^2);
block([mo],
mo : expand(cumuren(4, t, n0, kb, kd)),
factor(coeff (mo, N, 0)) +
factor(coeff (mo, N, -2)) / N^2);
/* + tags=[]
plot2d(
subst(1.0, s,
(%e^(s*tau)-1)*%e^(s*tau)*(%e^(s*tau)+1)/2),
[tau, 0, 1]);
| source/diffusion-approximation/genfunc-bid-diffapprox.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # place_number_puzzle
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/place_number_puzzle.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/place_number_puzzle.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Place number puzzle Google CP Solver.
http://ai.uwaterloo.ca/~vanbeek/Courses/Slides/introduction.pdf
'''
Place numbers 1 through 8 on nodes
- each number appears exactly once
- no connected nodes have consecutive numbers
2 - 5
/ | X | \
1 - 3 - 6 - 8
\ | X | /
4 - 7
""
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/place_number.mzn
* Comet: http://www.hakank.org/comet/place_number_puzzle.co
* ECLiPSe: http://www.hakank.org/eclipse/place_number_puzzle.ecl
* SICStus Prolog: http://www.hakank.org/sicstus/place_number_puzzle.pl
* Gecode: http://www.hakank.org/gecode/place_number_puzzle.cpp
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver("Place number")
# data
m = 32
n = 8
# Note: this is 1-based for compatibility (and lazyness)
graph = [[1, 2], [1, 3], [1, 4], [2, 1], [2, 3], [2, 5], [2, 6], [3, 2],
[3, 4], [3, 6], [3, 7], [4, 1], [4, 3], [4, 6], [4, 7], [5, 2],
[5, 3], [5, 6], [5, 8], [6, 2], [6, 3], [6, 4], [6, 5], [6, 7],
[6, 8], [7, 3], [7, 4], [7, 6], [7, 8], [8, 5], [8, 6], [8, 7]]
# declare variables
x = [solver.IntVar(1, n, "x%i" % i) for i in range(n)]
#
# constraints
#
solver.Add(solver.AllDifferent(x))
for i in range(m):
# Note: make 0-based
solver.Add(abs(x[graph[i][0] - 1] - x[graph[i][1] - 1]) > 1)
# symmetry breaking
solver.Add(x[0] < x[n - 1])
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(len(x))])
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
print()
| examples/notebook/contrib/place_number_puzzle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
f = open('../amazon_yolo_data/Home_and_Kitchen_5.json', 'r')
l = f.readlines()
megaAsinDict = {}
for line in l:
asinDict = json.loads(line)
if asinDict["asin"] in megaAsinDict:
megaAsinDict[asinDict["asin"]]["review_list"].append([asinDict["helpful"][1], asinDict["helpful"][0], asinDict["reviewText"]])
else:
megaAsinDict[asinDict["asin"]] = {"review_list" : [[asinDict["helpful"][1], asinDict["helpful"][0],
asinDict["reviewText"]]]}
# -
len(megaAsinDict.keys())
# +
def getMostHelpfulReview(listOfReviews):
allReviewsDict = {}
for review in listOfReviews:
if float(review[0]) > 0.0:
allReviewsDict[float(review[1])/float(review[0])] = review[2]
if len(allReviewsDict.keys()) > 0:
return allReviewsDict[max(allReviewsDict.keys())]
else:
return "NA"
asinReviewDict = {}
for asin in megaAsinDict:
bestReview = getMostHelpfulReview(megaAsinDict[asin]["review_list"])
if not bestReview == "NA":
asinReviewDict[asin] = {"Best review" : bestReview}
# -
with open('./out/asinReviewDict.json', 'w') as outfile:
json.dump(asinReviewDict, outfile)
| GetCleanJSONDump.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda env tflearn
# language: python
# name: tflearn
# ---
# # <NAME>
#
# In this notebook, I'll build a character-wise RNN trained on <NAME>, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
#
# This network is based off of <NAME>'s [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [<NAME>](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
#
# <img src="assets/charseq.jpeg" width="500">
# + deletable=true editable=true
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
# -
# First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
# Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
text[:100]
# And we can see the characters encoded as integers.
chars[:100]
# ## Making training and validation batches
#
# Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
#
# Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
#
# The idea here is to make a 2D matrix where the number of rows is equal to the batch size. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
# Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
train_x, train_y, val_x, val_y = split_data(chars, 10, 50)
train_x.shape
# Looking at the size of this array, we see that we have rows equal to the batch size. When we want to get a batch out of here, we can grab a subset of this array that contains all the rows but has a width equal to the number of steps in the sequence. The first batch looks like this:
train_x[:,:50]
# I'll write another function to grab batches out of the arrays made by `split_data`. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
# ## Building the model
#
# Below is a function where I build the graph for the network.
# + deletable=true editable=true
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# One-hot encoding the input and target characters
x_one_hot = tf.one_hot(inputs, num_classes)
y_one_hot = tf.one_hot(targets, num_classes)
### Build the RNN layers
# Use a basic LSTM cell
#lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
# Add dropout to the cell
#drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
#cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
cell = tf.nn.rnn_cell.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
### Run the data through the RNN layers
# This makes a list where each element is on step in the sequence
#rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, num_steps, x_one_hot)]
# Run each sequence step through the RNN and collect the outputs
#outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
outputs, state = tf.nn.rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one output row for each step for each batch
#seq_output = tf.concat(outputs, axis=1)
seq_output = tf.concat(1, outputs)
output = tf.reshape(seq_output, [-1, lstm_size])
# Now connect the RNN putputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(num_classes))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and batch
logits = tf.matmul(output, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
preds = tf.nn.softmax(logits, name='predictions')
# Reshape the targets to match the logits
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
cost = tf.reduce_mean(loss)
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
# NOTE: I'm using a namedtuple here because I think they are cool
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
# -
# ## Hyperparameters
#
# Here I'm defining the hyperparameters for the network.
#
# * `batch_size` - Number of sequences running through the network in one pass.
# * `num_steps` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
# * `lstm_size` - The number of units in the hidden layers.
# * `num_layers` - Number of hidden LSTM layers to use
# * `learning_rate` - Learning rate for training
# * `keep_prob` - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
#
# Here's some good advice from <NAME> on training the network. I'm going to write it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
#
# > ## Tips and Tricks
#
# >### Monitoring Validation Loss vs. Training Loss
# >If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
#
# > - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
# > - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
#
# > ### Approximate number of parameters
#
# > The two most important parameters that control the model are `lstm_size` and `num_layers`. I would advise that you always use `num_layers` of either 2/3. The `lstm_size` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
#
# > - The number of parameters in your model. This is printed when you start training.
# > - The size of your dataset. 1MB file is approximately 1 million characters.
#
# >These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
#
# > - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `lstm_size` larger.
# > - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that heps the validation loss.
#
# > ### Best models strategy
#
# >The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
#
# >It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
#
# >By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
#
# + deletable=true editable=true
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
keep_prob = 0.5
# -
# ## Training
#
# Time for training which is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint.
#
# Here I'm saving checkpoints with the format
#
# `i{iteration number}_l{# hidden layer units}_v{validation loss}.ckpt`
# + deletable=true editable=true
epochs = 20
# Save every N iterations
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/i{}_l{}_v{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
# -
# #### Saved checkpoints
#
# Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
# + deletable=true editable=true
tf.train.get_checkpoint_state('checkpoints')
# -
# ## Sampling
#
# Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
#
# The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
#
#
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
# Here, pass in the path to a checkpoint and sample from the network.
checkpoint = "checkpoints/____.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
| intro-to-rnns/.ipynb_checkpoints/Anna KaRNNa-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.0 32-bit (''.venv'': venv)'
# language: python
# name: python3
# ---
# # --- Day 1: Sonar Sweep ---
import pandas as pd
from IPython.core.display import display, HTML
df = pd.read_csv("data/input.csv", header=None)
df = df.diff(periods=1)
df.head()
df = df.dropna()
df.head()
df[df < 0] = 0
df[df > 0] = 1
df = df.astype(int)
df.head()
result = df.sum().squeeze()
display(HTML('<h1>{}</h1>'.format(result)))
| 1/1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 5 - 欢迎来到沙盒
#
# 在上一教程中,我们一直在手工初始化hook和所有工作机。 当您只是在玩耍/了解接口时,这可能会有些烦人。因此,从现在开始,我们将使用特殊的便捷函数创建所有这些相同的变量。
# In the last tutorials, we've been initializing our hook and all of our workers by hand every time. This can be a bit annoying when you're just playing around / learning about the interfaces. So, from here on out we'll be creating all these same variables using a special convenience function.
import torch
import syft as sy
sy.create_sandbox(globals())
# ### 沙盒能给我们什么?
#
# 如您在上面所看到的,我们创建了几个虚拟工作机,并加载了很多测试数据集,将它们分布在各个工作机周围,以便我们可以使用诸如联邦学习之类的隐私保护技术进行练习。
#
# 我们创造了六台工作机……
workers
# 我们还填充了大量的全局变量,我们可以立即使用的!
hook
bob
# ## 1: 工作机搜索功能
#
# 进行远程数据科学的一个重要方面是我们希望能够在远程计算机上搜索数据集。设想一个研究实验室想要向医院查询“无线电”数据集。
torch.Tensor([1,2,3,4,5])
x = torch.tensor([1,2,3,4,5]).tag("#fun", "#boston", "#housing").describe("The input datapoints to the boston housing dataset.")
y = torch.tensor([1,2,3,4,5]).tag("#fun", "#boston", "#housing").describe("The input datapoints to the boston housing dataset.")
z = torch.tensor([1,2,3,4,5]).tag("#fun", "#mnist",).describe("The images in the MNIST training dataset.")
x
# +
x = x.send(bob)
y = y.send(bob)
z = z.send(bob)
# 这会在标签或说明中搜索完全匹配
results = bob.search(["#boston", "#housing"])
# -
results
print(results[0].description)
# ## 2: 虚拟网格
#
# A Grid is simply a collection of workers which gives you some convenience functions for when you want to put together a dataset.
grid = sy.VirtualGrid(*workers)
results, tag_ctr = grid.search("#boston")
boston_data, _ = grid.search("#boston","#data")
boston_target, _ = grid.search("#boston","#target")
| examples/tutorials/translations/chinese/Part 05 - Welcome to the Sandbox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quantifying the overlap between TRASE and GADM 3.6 municipalities
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
import shapely.wkb
from shapely.ops import cascaded_union
from shapely.geometry import Polygon, Point, MultiPolygon
from tqdm import tqdm
# ## TRASE municipalities
trase = gpd.read_file('/Users/ikersanchez/Vizzuality/PROIEKTUAK/TRASE/work/data/BRAZIL_MUNICIPALITY.topo.json')
trase.drop(columns='id', inplace=True)
trase.head()
# ## GADM 3.6 municipalities
gadm = gpd.read_file('/Users/ikersanchez/Vizzuality/PROIEKTUAK/TRASE/work/data/gadm36_BRA_shp/gadm36_BRA_2.shp')
gadm.drop(columns=['GID_0', 'NAME_0', 'GID_1', 'NAME_1', 'NL_NAME_1', 'VARNAME_2', 'NL_NAME_2',
'TYPE_2', 'ENGTYPE_2', 'CC_2', 'HASC_2'] , inplace=True)
gadm.head()
# ## Quantifying the overlap
def rtree_intersect(gadm, trase):
fraction = []
gid_2 = []
name_2 = []
sindex = gadm.sindex
# We iterate over municipalities in trase
for n, municipality in enumerate(trase.geometry):
# Fix invalid shapes
if municipality.is_valid == False:
municipality = municipality.simplify(0.016, preserve_topology=True)
if municipality.is_valid == False:
municipality = cascaded_union(municipality)
# gadm areas that intersect with the municipality bound
possible_matches_index = list(sindex.intersection(municipality.bounds))
possible_matches = gadm.iloc[possible_matches_index]
frac = []
for i in range(len(possible_matches_index)):
geom = possible_matches.geometry.iloc[i]
# Intersection area
int_area = geom.intersection(municipality).area
# Residual area
res_area = (geom.area - int_area) + (municipality.area - int_area)
# Total area
tot_area = int_area + res_area
frac.append(int_area/tot_area)
frac = np.array(frac)
matches = possible_matches.iloc[np.where(frac == frac.max())]
gid_2.append(matches.get('GID_2').iloc[0])
name_2.append(matches.get('NAME_2').iloc[0])
fraction.append(frac.max())
return gid_2, name_2, fraction
gid_2, name_2, fraction = rtree_intersect(gadm, trase)
trase['GID_2'] = gid_2
trase['NAME_2'] = name_2
trase['match_fraction'] = fraction
trase.head()
# **Distribution of the accuracy**
hist = plt.hist(np.array(fraction), bins= 100)
# **85 % of the municipalities overlap with an accuracy greater than 90 %**
len(trase[trase['match_fraction'] > 0.9])/len(trase)*100
# and 74 % of the municipalities have an accuracy greater than 95 %
len(trase[trase['match_fraction'] > 0.95])/len(trase)*100
# ## Some examples
#
# In the next figures we show a few examples of the overlap between municipalities with different accuracies.
#
# In red we display GADM 3.6 polygons and overlaying, in blue, the polygons from TRASE.
def display_overlap(n):
sindex = gadm.sindex
municipality = trase.geometry[n]
possible_matches_index = list(sindex.intersection(municipality.bounds))
possible_matches = gadm.iloc[possible_matches_index]
fraction = []
for i in range(len(possible_matches_index)):
geom = possible_matches.geometry.iloc[i]
# Intersection area
int_area = geom.intersection(municipality).area
# Residual area
res_area = (geom.area - int_area) + (municipality.area - int_area)
# Total area
tot_area = int_area + res_area
fraction.append(int_area/tot_area)
fraction = np.array(fraction)
possible_matches.iloc[np.where(fraction == fraction.max())]
fig, ax = plt.subplots(figsize=[10,10])
ax.set_aspect('equal')
#possible_matches.iloc[:].plot(ax=ax, color='r', edgecolor='k', alpha=0.5)
possible_matches.iloc[np.where(fraction == fraction.max())].plot(ax=ax, edgecolor='k', alpha=0.75, color="#ffe1e6")
trase.iloc[n:(n+1)].plot(ax=ax, edgecolor='k', alpha=0.75, color="#c3e1ff")
# **Municipality with 30 % of accuracy**
#
# With this accuracy we have many examples as the one shown below.
# In GADM 3.6 we find two separated polygons with the same ID (BRA.21.264_1 in this case), while in TRASE these polygons belong to two different
# municipalities.
display_overlap(4003)
# Two different polygons in TRASE
trase[trase['GID_2'] == 'BRA.21.264_1']
trase[trase['GID_2'] == 'BRA.21.264_1'].plot(figsize=(10,10), edgecolor='k', color="#c3e1ff")
# **Municipality with 40 % of accuracy**
#
# With this accuracy we have many examples as the one shown below. In GADM 3.6 we find a single polygon, while in TRASE this polygon has been splitted into two different municipalities.
trase['match_fraction'].iloc[1505]
display_overlap(1505)
# Two different polygons in TRASE
trase[trase['GID_2'] == 'BRA.5.113_1']
trase[trase['GID_2'] == 'BRA.5.113_1'].plot(figsize=(10,10), edgecolor='k', color="#c3e1ff")
# **Municipality with 80 % of accuracy**
#
# Above this accuracy we find a one-to-one relation between GADM and Trase.
trase['match_fraction'].iloc[0]
display_overlap(0)
# **Municipality with 90 % of accuracy**
trase['match_fraction'].iloc[23]
display_overlap(23)
# **Municipality with 99 % of accuracy**
trase['match_fraction'].iloc[2]
display_overlap(2)
# From this analysis I would recommend to use only the municipalities with an accuracy greater than 90 %. 85 % of the municipalities fulfil the criteria.
| work/TRASE_overlap_GADM36.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="5hIbr52I7Z7U"
# Deep Learning
# =============
#
# Assignment 1
# ------------
#
# The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.
#
# This notebook uses the [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) dataset to be used with python experiments. This dataset is designed to look like the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="apJbCsBHl-2A"
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matlotlib backend as plotting inline in IPython
# %matplotlib inline
# + [markdown] colab_type="text" id="jNWGtZaXn-5j"
# First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 186058, "status": "ok", "timestamp": 1444485672507, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2a0a5e044bb03b66", "userId": "102167687554210253930"}, "user_tz": 420} id="EYRJ4ICW6-da" outputId="0d0f85df-155f-4a89-8e7e-ee32df36ec8d"
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
# + [markdown] colab_type="text" id="cC3p0oEyF8QT"
# Extract the dataset from the compressed .tar.gz file.
# This should give you a set of directories, labelled A through J.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 186055, "status": "ok", "timestamp": 1444485672525, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2a0a5e044bb03b66", "userId": "102167687554210253930"}, "user_tz": 420} id="H8CBE-WZ8nmj" outputId="ef6c790c-2513-4b09-962e-27c79390c762"
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
# + [markdown] colab_type="text" id="4riXK3IoHgx6"
# ---
# Problem 1
# ---------
#
# Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
#
# ---
# +
image_num = 133 # Select a number globally
def display_sample_images(folder, image_num):
"""Display a sample image from each character A through J"""
image_files = os.listdir(folder)
image = image_files[image_num]
image_file = os.path.join(folder, image)
print('A sample of image from',folder,'\r')
display(Image(image_file))
for folder in train_folders:
display_sample_images(folder, image_num)
# + [markdown] colab_type="text" id="PBdkjESPK8tw"
# Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
#
# We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road.
#
# A few images might not be readable, we'll just skip them.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 30}]} colab_type="code" executionInfo={"elapsed": 399874, "status": "ok", "timestamp": 1444485886378, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2a0a5e044bb03b66", "userId": "102167687554210253930"}, "user_tz": 420} id="h7q0XhG3MJdf" outputId="92c391bb-86ff-431d-9ada-315568a19e59"
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
# + [markdown] colab_type="text" id="vUdbskYE2d87"
# ---
# Problem 2
# ---------
#
# Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
#
# ---
# +
def read_from_pickle(filename):
with open(filename, 'rb') as f:
dataset = pickle.load(f)
return dataset
plt.rcParams['figure.figsize'] = (15.0, 15.0)
f, ax = plt.subplots(nrows=1, ncols=10)
for i, filename in enumerate(train_datasets):
image_slice = read_from_pickle(filename)[image_num, :, :]
ax[i].axis('off')
ax[i].set_title(filename[15], loc='center')
ax[i].imshow(image_slice)
del image_slice
# + [markdown] colab_type="text" id="cYznx5jUwzoO"
# ---
# Problem 3
# ---------
# Another check: we expect the data to be balanced across classes. Verify that.
#
# ---
# +
for i, filename in enumerate(train_datasets):
image_dataset = read_from_pickle(filename)
image_shape = image_dataset.shape[0]
print('Dataset of', filename[15], 'contains', image_shape, 'images.')
del image_dataset, image_shape
# + [markdown] colab_type="text" id="LA7M7K22ynCt"
# Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9.
#
# Also create a validation dataset for hyperparameter tuning.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 411281, "status": "ok", "timestamp": 1444485897869, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2a0a5e044bb03b66", "userId": "102167687554210253930"}, "user_tz": 420} id="s3mWgZLpyuzq" outputId="8af66da6-902d-4719-bedc-7c9fb7ae7948"
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 20000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
# + [markdown] colab_type="text" id="GPTCnjIcyuKN"
# Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="6WZ2l2tN2zOL"
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
# + [markdown] colab_type="text" id="puDUTe6t6USl"
# ---
# Problem 4
# ---------
# Convince yourself that the data is still good after shuffling!
#
# ---
# +
f, ax = plt.subplots(nrows=1, ncols=10)
for i, j in enumerate(np.random.randint(0, train_size, 10)):
image_slice = train_dataset[j, :, :]
image_label = train_labels[j]
ax[i].axis('off')
title = '#' + str(j) + ': ' + chr(image_label+65)
ax[i].set_title(title, loc='center')
ax[i].imshow(image_slice)
del image_slice
# + [markdown] colab_type="text" id="tIQJaJuwg5Hw"
# Finally, let's save the data for later reuse:
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="QiR_rETzem6C"
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 413065, "status": "ok", "timestamp": 1444485899688, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2a0a5e044bb03b66", "userId": "102167687554210253930"}, "user_tz": 420} id="hQbLjrW_iT39" outputId="b440efc6-5ee1-4cbc-d02d-93db44ebd956"
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
# + [markdown] colab_type="text" id="gE_cRAQB33lk"
# ---
# Problem 5
# ---------
#
# By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.
# Measure how much overlap there is between training, validation and test samples.
#
# Optional questions:
# - What about near duplicates between datasets? (images that are almost identical)
# - Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
# ---
# +
train_r = train_dataset.reshape(train_dataset.shape[0],-1)
train_idx = np.lexsort(train_r.T)
train_dataset_sanitized = train_dataset[train_idx][np.append(True,(np.diff(train_r[train_idx],axis=0)!=0).any(1))]
train_labels_sanitized = train_labels[train_idx][np.append(True,(np.diff(train_r[train_idx],axis=0)!=0).any(1))]
valid_r = valid_dataset.reshape(valid_dataset.shape[0],-1)
valid_idx = np.lexsort(valid_r.T)
valid_dataset_sanitized = valid_dataset[valid_idx][np.append(True,(np.diff(valid_r[valid_idx],axis=0)!=0).any(1))]
valid_labels_sanitized = valid_labels[valid_idx][np.append(True,(np.diff(valid_r[valid_idx],axis=0)!=0).any(1))]
test_r = test_dataset.reshape(test_dataset.shape[0],-1)
test_idx = np.lexsort(test_r.T)
test_dataset_sanitized = test_dataset[test_idx][np.append(True,(np.diff(test_r[test_idx],axis=0)!=0).any(1))]
test_labels_sanitized = test_labels[test_idx][np.append(True,(np.diff(test_r[test_idx],axis=0)!=0).any(1))]
del train_r, valid_r, test_r
print('Training dataset has', train_dataset_sanitized.shape[0],'unique images.')
print('Sanitized training dataset has', train_dataset_sanitized.shape[0],'images.\n')
print('Validation dataset has', valid_dataset_sanitized.shape[0],'unique images.')
print('Test dataset has', test_dataset_sanitized.shape[0],'unique images.\n')
train_r = train_dataset_sanitized.reshape(train_dataset_sanitized.shape[0],-1)
valid_r = valid_dataset_sanitized.reshape(valid_dataset_sanitized.shape[0],-1)
test_r = test_dataset_sanitized.reshape(test_dataset_sanitized.shape[0],-1)
valid_dup = []
test_dup = []
train_r = {tuple(row):i for i,row in enumerate(train_r)}
for i,row in enumerate(valid_r):
if tuple(row) in train_r:
valid_dup.append(i)
for i,row in enumerate(test_r):
if tuple(row) in train_r:
test_dup.append(i)
print('Validation dataset has', len(valid_dup), 'duplicate images to training dataset.')
print('Test dataset has', len(test_dup), 'duplicate images to training dataset.\n')
valid_dataset_sanitized = np.delete(valid_dataset_sanitized, np.asarray(valid_dup), 0)
valid_labels_sanitized = np.delete(valid_labels_sanitized, np.asarray(valid_dup), 0)
test_dataset_sanitized = np.delete(test_dataset_sanitized, np.asarray(test_dup), 0)
test_labels_sanitized = np.delete(test_labels_sanitized, np.asarray(test_dup), 0)
print('Sanitized validation dataset has', valid_dataset_sanitized.shape[0],'images.')
print('Sanitized test dataset has', test_dataset_sanitized.shape[0],'images.')
# +
pickle_file = 'notMNIST_sanitized.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset_sanitized,
'train_labels': train_labels_sanitized,
'valid_dataset': valid_dataset_sanitized,
'valid_labels': valid_labels_sanitized,
'test_dataset': test_dataset_sanitized,
'test_labels': test_labels_sanitized,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
# + [markdown] colab_type="text" id="L8oww1s4JMQx"
# ---
# Problem 6
# ---------
#
# Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
#
# Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.
#
# Optional question: train an off-the-shelf model on all the data!
#
# ---
# +
from sklearn.metrics import classification_report, confusion_matrix
def train_predict(clf, n_data, train_data, train_label, test_data, test_label):
clf.fit(train_dataset[:n_data,:,:].reshape(n_data,-1), train_labels[:n_data])
# Predict
expected = test_labels
predicted = clf.predict(test_dataset.reshape(test_dataset.shape[0],-1))
# Print Results
print('Classification Report of',n_data,'training samples:\n', classification_report(expected, predicted))
print('Confusion Matrix of',n_data,'training samples:\n', confusion_matrix(expected, predicted))
# Create a Logistic Regression Classifier
clf = LogisticRegression(penalty='l2', tol=0.0001, C=1.0, random_state=133, solver='sag', max_iter=100, multi_class='ovr', verbose=0, n_jobs=4)
train_predict(clf, 50, train_dataset, train_labels, test_dataset, test_labels)
train_predict(clf, 100, train_dataset, train_labels, test_dataset, test_labels)
train_predict(clf, 1000, train_dataset, train_labels, test_dataset, test_labels)
train_predict(clf, 5000, train_dataset, train_labels, test_dataset, test_labels)
train_predict(clf, 20000, train_dataset, train_labels, test_dataset, test_labels)
# +
# Train and predict sanitized datasets
train_predict(clf, train_dataset_sanitized.shape[0], train_dataset_sanitized, train_labels_sanitized, test_dataset_sanitized, test_labels_sanitized)
| 1_notmnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Write and Save Files in Python</h1>
# <p><strong>Welcome!</strong> This notebook will teach you about write the text to file in the Python Programming Language. By the end of this lab, you'll know how to write to file and copy the file.</p>
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li><a href="write">Writing Files</a></li>
# <li><a href="copy">Copy a File</a></li>
# </ul>
# <p>
# Estimated time needed: <strong>15 min</strong>
# </p>
# </div>
#
# <hr>
# <h2 id="write">Writing Files</h2>
# We can open a file object using the method <code>write()</code> to save the text file to a list. To write the mode, argument must be set to write <b>w</b>. Let’s write a file <b>Example2.txt</b> with the line: <b>“This is line A”</b>
# +
# Write line to file
with open('/resources/data/Example2.txt', 'w') as writefile:
writefile.write("This is line A")
# -
# We can read the file to see if it worked:
# +
# Read file
with open('/resources/data/Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
# -
# We can write multiple lines:
# +
# Write lines to file
with open('/resources/data/Example2.txt', 'w') as writefile:
writefile.write("This is line A\n")
writefile.write("This is line B\n")
# -
# The method <code>.write()</code> works similar to the method <code>.readline()</code>, except instead of reading a new line it writes a new line. The process is illustrated in the figure , the different colour coding of the grid represents a new line added to the file after each method call.
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/WriteLine.png" width="500" />
# You can check the file to see if your results are correct
# +
# Check whether write to file
with open('/resources/data/Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
# -
# By setting the mode argument to append **a** you can append a new line as follows:
# +
# Write a new line to text file
with open('/resources/data/Example2.txt', 'a') as testwritefile:
testwritefile.write("This is line C\n")
# -
# You can verify the file has changed by running the following cell:
# +
# Verify if the new line is in the text file
with open('/resources/data/Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
# -
# We write a list to a <b>.txt</b> file as follows:
# +
# Sample list of text
Lines = ["This is line A\n", "This is line B\n", "This is line C\n"]
Lines
# +
# Write the strings in the list to text file
with open('Example2.txt', 'w') as writefile:
for line in Lines:
print(line)
writefile.write(line)
# -
# We can verify the file is written by reading it and printing out the values:
# +
# Verify if writing to file is successfully executed
with open('Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
# -
# We can again append to the file by changing the second parameter to <b>a</b>. This adds the code:
# +
# Append the line to the file
with open('Example2.txt', 'a') as testwritefile:
testwritefile.write("This is line D\n")
# -
# We can see the results of appending the file:
# +
# Verify if the appending is successfully executed
with open('Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
# -
# <hr>
# <h2 id="copy">Copy a File</h2>
# Let's copy the file <b>Example2.txt</b> to the file <b>Example3.txt</b>:
# +
# Copy file to another
with open('Example2.txt','r') as readfile:
with open('Example3.txt','w') as writefile:
for line in readfile:
writefile.write(line)
# -
# We can read the file to see if everything works:
# +
# Verify if the copy is successfully executed
with open('Example3.txt','r') as testwritefile:
print(testwritefile.read())
# -
# After reading files, we can also write data into files and save them in different file formats like **.txt, .csv, .xls (for excel files) etc**. Let's take a look at some examples.
# Now go to the directory to ensure the <b>.txt</b> file exists and contains the summary data that we wrote.
# <hr>
# <h2>The last exercise!</h2>
# <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
# <hr>
# <h3>About the Authors:</h3>
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| 2 - Python For Data Science/4.2 WriteFile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Task 1
#
# Test the hypothesis that the delay is from Normal distribution. and that **mean** of the delay is 0. Be careful about the outliers.
# ***Strategy***
# - Look into the arr_delay and determine the distribution
# - Try using descriptive statistics and LOF as a means to treat the outliers
# ---
# **INPUT**: data with nulls removed
# ---
#import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv("flight_sample_small.csv")
df = pd.read_csv("flight_sample_large.csv")
# Looking at the arrival delay descriptive statistics
df.arr_delay.describe()
# Let's look at a graph
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.distplot(df.arr_delay).set_title("Delays (no outlier treatment)")
# From the graph we can see there is a large pull of outliers we will consider 2 options for outlier treatment
# 1. Descriptive Statistics
# 2. LOF
# *Begining with Descriptive Statistics:*
# - these state anything outside of 1.5XIQR is considered an outlier
# - the issue lies in that the outliers are influincing the descriptive statistics
#find the IQR of our data
IQR = df.arr_delay.describe()[6] - df.arr_delay.describe()[4]
min_delay = df.arr_delay.describe()[1]-1.5*IQR
max_delay = df.arr_delay.describe()[1]+1.5*IQR
print(min_delay, max_delay)
# we will filter our data to only include the delays within this range
df_iqr = df[df.arr_delay <= max_delay]
df_iqr = df_iqr[df_iqr.arr_delay >= min_delay]
df_iqr.arr_delay.describe()
#determine how much of the data was treated as outliers
print("The percentage of observations treated as outliers is: ",(df.shape[0]-df_iqr.shape[0])/df.shape[0]*100)
#view as a plot
sns.distplot(df_iqr.arr_delay, bins=60).set_title("Delays (IQR Treatment)")
df_iqr.to_csv("iqr.csv", index=False)
df_iqr.arr_delay.to_csv("iqr_taskone.csv", index=False)
# *Local Outlier Factor*
# - use unsupervised learning to determine the outliers
from sklearn.neighbors import LocalOutlierFactor
X = df.arr_delay.values.reshape(-1,1)
clf = LocalOutlierFactor(n_neighbors=35)
clf.fit(X)
X_scores = clf.negative_outlier_factor_
def check_lof_drop (col1, col2):
"""Used to determine if the negative outlier factor is within the threshold"""
threshold = -1.25
if col2 < threshold:
return 1
return 0
# +
#determine the boundaries on either side
test = pd.DataFrame(X, X_scores)
test.reset_index(level=0, inplace=True)
test = test.rename(columns = {'index' : 'negative_outlier_factor', 0 : 'arr_delay'})
test["drop_lof"] = test.apply(lambda x: check_lof_drop(x.arr_delay, x.negative_outlier_factor), axis=1)
# -
df_LOF = df[df.arr_delay <= 266]
df_LOF = df_LOF[df_LOF.arr_delay >= -58]
df_LOF.arr_delay.describe()
#determine how much of the data was treated as outliers
print("The percentage of observations treated as outliers is: ",(df.shape[0]-df_LOF.shape[0])/df.shape[0]*100)
sns.distplot(df_LOF.arr_delay).set_title("Delays (LOF)")
df_LOF.to_csv("lof.csv", index=False)
df_LOF.arr_delay.to_csv("lof_taskone.csv", index=False)
| Notebooks/2_Task_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras import callbacks
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import pathlib
import shutil
import tempfile
# -
logdir = pathlib.Path(tempfile.mkdtemp())/"tensorboard_logs"
shutil.rmtree(logdir, ignore_errors=True)
gz = tf.keras.utils.get_file('HIGGS.csv.gz', 'http://mlphysics.ics.uci.edu/data/higgs/HIGGS.csv.gz')
FEATURES = 28
ds = tf.data.experimental.CsvDataset(gz,[float(),]*(FEATURES+1), compression_type="GZIP")
def pack_row(*row):
label = row[0]
features = tf.stack(row[1:],1)
return features, label
packed_ds = ds.batch(10000).map(pack_row).unbatch()
for features,label in packed_ds.batch(1000).take(1):
print(features[0])
plt.hist(features.numpy().flatten(), bins = 101)
# +
N_VALIDATION = int(1e3)
N_TRAIN = int(1e4)
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 500
STEPS_PER_EPOCH = N_TRAIN//BATCH_SIZE
validate_ds = packed_ds.take(N_VALIDATION).cache()
train_ds = packed_ds.skip(N_VALIDATION).take(N_TRAIN).cache()
# -
train_ds
validate_ds = validate_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)
# +
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
0.001,
decay_steps=STEPS_PER_EPOCH*1000,
decay_rate=1,
staircase=False)
def get_optimizer():
return tf.keras.optimizers.Adam(lr_schedule)
# -
step = np.linspace(0,100000)
lr = lr_schedule(step)
plt.figure(figsize = (8,6), dpi=150)
plt.plot(step/STEPS_PER_EPOCH, lr)
plt.ylim([0,max(plt.ylim())])
plt.xlabel('Epoch')
_ = plt.ylabel('Learning Rate')
# 因为训练时间短并且训练周期长,因此全输出是没必要的,以下代码实现每100个 epoch 打印一次训练进度;
class EpochDots(tf.keras.callbacks.Callback):
"""
A simple callback that prints a "." every epoch, with occasional reports.
Args:
report_every: How many epochs between full reports
dot_every: How many epochs between dots.
"""
def __init__(self, report_every=100, dot_every=1):
self.report_every = report_every
self.dot_every = dot_every
def on_epoch_end(self, epoch, logs):
if epoch % self.report_every == 0:
print()
print('Epoch: {:d}, '.format(epoch), end='')
for name, value in sorted(logs.items()):
print('{}:{:0.4f}'.format(name, value), end=', ')
print()
if epoch % self.dot_every == 0:
print('.', end='')
def get_callbacks(name):
return [
EpochDots(),
tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),
tf.keras.callbacks.TensorBoard(logdir/name),
]
def compile_and_fit(model, name, optimizer=None, max_epochs=1000):
if optimizer is None:
optimizer = get_optimizer()
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[tf.keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy'), 'accuracy'])
model.summary()
history = model.fit(
train_ds,
steps_per_epoch = STEPS_PER_EPOCH,
epochs=max_epochs,
validation_data=validate_ds,
callbacks=get_callbacks(name),
verbose=0)
return history
# +
tiny_model = tf.keras.Sequential([
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(1)
])
size_histories = {}
size_histories['Tiny'] = compile_and_fit(tiny_model, 'sizes/Tiny')
# +
import matplotlib.pyplot as plt
import numpy as np
prop_cycle = plt.rcParams['axes.prop_cycle']
COLOR_CYCLE = prop_cycle.by_key()['color']
def _smooth(values, std):
"""
Smooths a list of values by convolving with a gussian.
Assumes equal spacing.
Args:
values: A 1D array of values to smooth.
std: The standard devistion of the gussian. The units are array elements.
Returns:
The smoothed array.
"""
width = std * 4
x = np.linspace(-width, width, 2 * width + 1)
kernel = np.exp(-(x / 5)**2)
values = np.array(values)
weights = np.ones_like(values)
smoothed_values = np.convolve(values, kernel, mode='same')
smoothed_weights = np.convolve(weights, kernel, mode='same')
return smoothed_values / smoothed_weights
class HistoryPlotter(object):
"""
A class for plotting named set of keras-histories.
The class maintains colors for each key from plot to plot.
"""
def __init__(self, metric=None, smoothing_std=None):
self.color_table = {}
self.metric = metric
self.smoothing_std = smoothing_std
def plot(self, histories, metric=None, smoothing_std=None):
"""
Plots a {name: history} dictionary of keras histories.
Colors are assigned to the name-key, and maintained from call to call.
Training metrics are shown as a solid line, validation metrics dashed.
Args:
histories: {name: history} dictionary of keras histories.
metric: which metric to plot from all the histories.
smoothing_std: the standard-deviaation of the smoothing kernel applied before plotting. The units are in array-indices.
"""
if metric is None:
metric = self.metric
if smoothing_std is None:
smoothing_std = self.smoothing_std
plt.figure(dpi=200)
for name, history in histories.items():
# Remember name->color asociations.
if name in self.color_table:
color = self.color_table[name]
else:
color = COLOR_CYCLE[len(self.color_table) % len(COLOR_CYCLE)]
self.color_table[name] = color
train_value = history.history[metric]
val_value = history.history['val_' + metric]
if smoothing_std is not None:
train_value = _smooth(train_value, std=smoothing_std)
val_value = _smooth(val_value, std=smoothing_std)
plt.plot(
history.epoch,
train_value,
color=color,
label=name.title() + ' Train')
plt.plot(
history.epoch,
val_value,
'--',
label=name.title() + ' Val',
color=color)
plt.xlabel('Epochs')
plt.ylabel(metric.replace('_', ' ').title())
plt.legend()
plt.xlim(
[0, max([history.epoch[-1] for name, history in histories.items()])])
plt.grid(True)
# -
plotter = HistoryPlotter(metric = 'binary_crossentropy', smoothing_std=10)
plotter.plot(size_histories)
plt.ylim([0.5, 0.7])
# +
small_model = tf.keras.Sequential([
# `input_shape` is only required here so that `.summary` works.
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(16, activation='elu'),
layers.Dense(1)
])
size_histories['Small'] = compile_and_fit(small_model, 'sizes/Small')
# +
medium_model = tf.keras.Sequential([
layers.Dense(64, activation='elu', input_shape=(FEATURES,)),
layers.Dense(64, activation='elu'),
layers.Dense(64, activation='elu'),
layers.Dense(1)
])
size_histories['Medium'] = compile_and_fit(medium_model, "sizes/Medium")
# +
large_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(1)
])
size_histories['large'] = compile_and_fit(large_model, "sizes/large")
# -
size_histories
plotter.plot(size_histories)
a = plt.xscale('log')
plt.xlim([5, max(plt.xlim())])
plt.ylim([0.5, 0.7])
plt.xlabel("Epochs [Log Scale]")
regularizer_histories = {}
regularizer_histories['Tiny'] = size_histories['Tiny']
# +
l2_model = tf.keras.Sequential([
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001),
input_shape=(FEATURES,)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(1)
])
regularizer_histories['l2'] = compile_and_fit(l2_model, "regularizers/l2")
# -
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
# +
dropout_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['dropout'] = compile_and_fit(dropout_model, "regularizers/dropout")
# -
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
# +
combined_model = tf.keras.Sequential([
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['combined'] = compile_and_fit(combined_model, "regularizers/combined")
# -
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
| Part I Basic Usage/04.Overfit and Underfit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python3
# name: python3
# ---
# # Contents:
# - [Notation](pdf/pdf.html#notation)
# - [Model Ingredients and Assumptions (test case for brackets)](pdf/pdf.html#model-ingredients-and-assumptions-test-case-for-brackets)
# - [Dynamic Interpretation](pdf/pdf.html#dynamic-interpretation)
#
#
# [References](references.ipynb)
| tests/pdf/ipynb/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Qiskit Runtime
# Qiskit Runtime is a new architecture offered by IBM Quantum that streamlines computations requiring many iterations. These experiments will execute significantly faster within this improved hybrid quantum/classical process.
#
# Using Qiskit Runtime, for example, a research team at IBM Quantum was able to achieve 120x speed
# up in their lithium hydride simulation (link to come).
#
# Qiskit Runtime allows authorized users to upload their Qiskit quantum programs for themselves or
# others to use. A Qiskit quantum program, also called a Qiskit runtime program, is a piece of Python code that takes certain inputs, performs
# quantum and maybe classical computation, and returns the processing results. The same or other
# authorized users can then invoke these quantum programs by simply passing in the required input parameters.
# <div class="alert alert-block alert-info">
# <b>Note:</b> Qiskit Runtime is only available to select IBM Quantum providers. You can use the `has_service()` method to check if a provider has access:
# </div>
# +
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(project='qiskit-runtime') # Change this to your provider.
can_use_runtime = provider.has_service('runtime')
# -
#
# If you don't have an IBM Quantum account, you can sign up for one on the [IBM Quantum](https://quantum-computing.ibm.com/) page.
# ## Listing programs <a name='listing_program'>
# The `provider.runtime` object is an instance of the [`IBMRuntimeService`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService) class and serves as the main entry point to using the runtime service. It has three methods that can be used to find metadata of available programs:
# - `pprint_programs()`: pretty prints metadata of all available programs
# - `programs()`: returns a list of `RuntimeProgram` instances
# - `program()`: returns a single `RuntimeProgram` instance
#
# The metadata of a runtime program includes its ID, name, description, version, input parameters, return values, interim results, maximum execution time, and backend requirements. Maximum execution time is the maximum amount of time, in seconds, a program can run before being forcibly terminated.
# To print the metadata of all available programs:
provider.runtime.pprint_programs()
# To print the metadata of the program `sample-program`:
program = provider.runtime.program('sample-program')
print(program)
# As you can see from above, the program `sample-program` is a simple program that has only 1 input parameter `iterations`, which indicates how many iterations to run. For each iteration it generates and runs a random 5-qubit circuit and returns the counts as well as the iteration number as the interim results. When the program finishes, it returns the sentence `All done!`. This program can only run for 300 seconds (5 minutes), and requires a backend that has at least 5 qubits.
# ## Invoking a runtime program <a name='invoking_program'>
# You can use the [`IBMRuntimeService.run()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.run) method to invoke a runtime program. This method takes the following parameters:
#
# - `program_id`: ID of the program to run
# - `inputs`: Program input parameters. These input values are passed to the runtime program.
# - `options`: Runtime options. These options control the execution environment. Currently the only available option is `backend_name`, which is required.
# - `callback`: Callback function to be invoked for any interim results. The callback function will receive 2 positional parameters: job ID and interim result.
# - `result_decoder`: Optional class used to decode job result.
# Before we run a quantum program, we may want to define a callback function that would process interim results, which are intermediate data provided by a program while its still running.
#
# As we saw earlier, the metadata of `sample-program` says that its interim results are the iteration number and the counts of the randomly generated circuit. Here we define a simple callback function that just prints these interim results:
def interim_result_callback(job_id, interim_result):
print(f"interim result: {interim_result}")
# The following example runs the `sample-program` program with 3 iterations on `ibmq_montreal` and waits for its result. You can also use a different backend that supports Qiskit Runtime:
backend = provider.get_backend('ibmq_montreal')
program_inputs = {
'iterations': 3
}
options = {'backend_name': backend.name()}
job = provider.runtime.run(program_id="sample-program",
options=options,
inputs=program_inputs,
callback=interim_result_callback
)
print(f"job id: {job.job_id()}")
result = job.result()
print(result)
# The `run()` method returns a [`RuntimeJob`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.RuntimeJob.html#qiskit.providers.ibmq.runtime.RuntimeJob) instace, which is similar to the `Job` instance returned by regular `backend.run()`. `RuntimeJob` supports the following methods:
#
# - `status()`: Return job status.
# - `result()`: Wait for the job to finish and return the final result.
# - `cancel()`: Cancel the job.
# - `wait_for_final_state()`: Wait for the job to finish.
# - `stream_results()`: Stream interim results. This can be used to start streaming the interim results if a `callback` function was not passed to the `run()` method. This method can also be used to reconnect a lost websocket connection.
# - `job_id()`: Return the job ID.
# - `backend()`: Return the backend where the job is run.
# - `logs()`: Return job logs.
# - `error_message()`: Returns the reason if the job failed and `None` otherwise.
# ## Retrieving old jobs
# You can use the [`IBMRuntimeService.job()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.job) method to retrieve a previously executed runtime job. Attributes of this [`RuntimeJob`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.RuntimeJob.html#qiskit.providers.ibmq.runtime.RuntimeJob) instace can tell you about the execution:
retrieved_job = provider.runtime.job(job.job_id())
print(f"Job {retrieved_job.job_id()} is an execution instance of runtime program {retrieved_job.program_id}.")
print(f"This job ran on backend {retrieved_job.backend()} and had input parameters {retrieved_job.inputs}")
# Similarly, you can use [`IBMRuntimeService.jobs()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.jobs) to get a list of jobs. You can specify a limit on how many jobs to return. The default limit is 10:
retrieved_jobs = provider.runtime.jobs(limit=1)
for rjob in retrieved_jobs:
print(rjob.job_id())
# ## Deleting a job
# You can use the [`IBMRuntimeService.delete_job()`](https://qiskit.org/documentation/stubs/qiskit.providers.ibmq.runtime.IBMRuntimeService.html#qiskit.providers.ibmq.runtime.IBMRuntimeService.delete_job) method to delete a job. You can only delete your own jobs, and this action cannot be reversed.
provider.runtime.delete_job(job.job_id())
import qiskit.tools.jupyter
# %qiskit_version_table
| tutorials/00_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TF
# language: python
# name: tf
# ---
import clean_data_svi as cds
import supervised as sup
import itertools
import pathlib
import matplotlib.pyplot as plt
import keras_model as km
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from datetime import timedelta, datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import tensorflow as tf
from keras import backend as K, Sequential, Input, Model
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM, Bidirectional, Conv1D, MaxPooling1D, MaxPooling2D, Flatten, \
TimeDistributed, RepeatVector, Dropout, GRU, AveragePooling1D
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error, median_absolute_error, roc_curve, auc, f1_score, \
precision_recall_curve, r2_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix, ConfusionMatrixDisplay
import imblearn
from imblearn import under_sampling, over_sampling
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN, BorderlineSMOTE, SVMSMOTE, KMeansSMOTE
# +
#useful functions
# -
def threshold_for_max_f1(y_real, Yhat):
'''
Given inputs y_real and y_predict, the function returns
the threshold (rounded to the nearest hundredth) that
maximizes f1.
Note: this func not necessarily optimized, could return to
doing this but not needed).
Also note that we calculate f1 without using the results method
in keras_model. This is because we need to check beforehand that
computing f1 won't produce a NaN so we won't get an invalid value warning.
'''
#error is occuring in km.results when computing f1, because TNR, NPV are 0, implying that there
#are no true negatives. While we added if statements to account for at least one predicted
#negative, this does not correlate to at least one true negative. Hence, instead of using keras.results
#we use that code by check that tn is not 0
f1_vals = []
for i in range(0, 100):
threshold = i/100
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
cm = confusion_matrix(y_real, y_predict)
tn, fp, fn, tp = confusion_matrix(y_real, y_predict).ravel()
if tn != 0:
TNR = (tn) / (tn + fp)
NPV = (tn) / (tn + fn)
f1 = 2 * (TNR * NPV) / (TNR + NPV)
else:
f1 = -2
f1_vals.append(f1)
f1_vals = np.array(f1_vals)
f1_vals = np.nan_to_num(f1_vals, nan=-1)
return (np.argmax(f1_vals))/100 #Note that we return the max f1 while we need to return the threshold that provided the max f1
def plot_cf_matrix(y_real, y_predict):
'''
Given y_real and y_predict, this method displays the results
(accuracy, recall, precision, f1) followed by the plot of the confusion matrix.
'''
print('results:', km.results(y_real, y_predict), '\n')
classes = ['High_svi', 'Low_svi']
cm = confusion_matrix(y_real, y_predict)
disp = sup.plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues)
plt.show(disp)
def over_sample(X, Y, sampler):
'''
This function over samples data X and labels Y. Because each piece of data is 2D (hence X is 3D),
we need to do some clever resizing since the imblearn Oversampling functions only work with a 2D X.
'''
sampler = sampler(random_state=0)
orig_X_shape = X.shape
X_reshaped = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]))
X_resampled, Y_resampled = sampler.fit_resample(X_reshaped, Y)
X_resampled = np.reshape(X_resampled, (X_resampled.shape[0], orig_X_shape[1], orig_X_shape[2]))
return X_resampled, Y_resampled
# +
#compiling data
# -
SVI_list = []
for i in range(4):
df = pd.read_csv(f"clean_tables/svi_{i+1}.csv", index_col="date")
df = df.drop(columns=['Settling_velocity', 'SV_label', 'SVI_label'])
df.index = pd.to_datetime(df.index, dayfirst=True)
SVI_list.append(df)
temp_df = pd.read_csv("clean_tables/temperatur.csv", index_col="date")
temp_df.index = pd.to_datetime(temp_df.index, dayfirst=True)
sludge_age_df = pd.read_csv("clean_tables/sludge_age_f_m.csv", index_col="date")
sludge_age_df.index = pd.to_datetime(sludge_age_df.index, dayfirst=True)
reactor_list = []
for i in range(4):
join = pd.concat([SVI_list[i], temp_df], axis=1)
if i <=1:
join = pd.concat([join, sludge_age_df.iloc[:, np.r_[0, 2]]], axis=1)
else:
join = pd.concat([join, sludge_age_df.iloc[:, np.r_[1, 3]]], axis=1)
join.columns = ['SVI', 'Temperature', 'F_M', 'Sludge Age']
reactor_list.append(join)
reactor_list[1]
micro_list = []
for i in range(4):
df = pd.read_csv(f"clean_tables/micro_{i+1}.csv", index_col="date")
df.index = pd.to_datetime(df.index, dayfirst=True)
micro_list.append(df)
micro_list[0]
join_list = []
for i in range(4):
join = pd.concat([reactor_list[i], micro_list[i]], axis=1)
join_list.append(join)
join_list[0]
# +
#model without resampling
# -
X, Y = km.create_join_x_y_arr(join_list, n_steps_in=7, binary=True)
X_normalize, Y_normalize, scalers = km.normalize(X, Y)
X_normalize = np.nan_to_num(X_normalize, nan=-1)
# +
Xtrain, Xtest, ytrain, ytest = train_test_split(X_normalize, Y_normalize, test_size=0.10, random_state=42)
model = Sequential()
model.add(LSTM(units=50, activation='relu', name='first_lstm', recurrent_dropout=0.1, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dense(25, activation='relu'))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=[keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5)])
model.fit(Xtrain, ytrain, epochs=3, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
# -
plot_cf_matrix(y_real, y_predict)
# +
#model with resampling
# -
X, Y = km.create_join_x_y_arr(join_list, n_steps_in=7, binary=True)
X_normalize, Y_normalize, scalers = km.normalize(X, Y)
X_normalize = np.nan_to_num(X_normalize, nan=-1)
# X_normalize, Y_normalize = over_sample(X_normalize, Y_normalize)
# samplers_available = [RandomOverSampler, SMOTE, ADASYN, BorderlineSMOTE, SVMSMOTE]
samplers = [SMOTE]
results_list = []
for sampler in samplers:
Xtrain, Xtest, ytrain, ytest = train_test_split(X_normalize, Y_normalize, test_size=0.10, random_state=42)
Xtrain, ytrain = over_sample(Xtrain, ytrain, sampler)
model = Sequential()
model.add(LSTM(units=50, activation='relu', name='first_lstm', recurrent_dropout=0.1, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dense(25, activation='relu'))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=[keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5)])
model.fit(Xtrain, ytrain, epochs=6, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
print(km.results(y_real, y_predict))
results_list.append(km.results(y_real, y_predict))
# +
Xtrain, Xtest, ytrain, ytest = get_Xtrain_Xtest_ytrain_ytest(7, 1, resample)
samplers = [SMOTE]
results_list = []
for sampler in samplers:
model = Sequential()
model.add(LSTM(units=50, activation='relu', name='first_lstm', recurrent_dropout=0.1, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dense(25, activation='relu'))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=[keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5)])
model.fit(Xtrain, ytrain, epochs=6, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
print(km.results(y_real, y_predict))
results_list.append(km.results(y_real, y_predict))
# -
plot_cf_matrix(y_real, y_predict)
res = []
# +
def results_from_training_model(j = 50, k = 25, l = 4, m = 10, resample = SMOTE, recurrent_dropout = 0.1, threshold = 0.85, epochs = 6, batch_size = 10):
Xtrain, Xtest, ytrain, ytest, scalers = get_Xtrain_Xtest_ytrain_ytest_scalers(7, 1, resample)
model = Sequential()
model.add(LSTM(units=j, activation='relu', name='first_lstm', recurrent_dropout=recurrent_dropout, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dense(k, activation='relu'))
model.add(Dense(m, activation='relu'))
model.add(Dense(l, activation='relu'))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=[keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=threshold)])
model.fit(Xtrain, ytrain, epochs=epochs, batch_size=batch_size, shuffle=True)
X_fit_threshold, Xtest, y_fit_threshold, ytest = train_test_split(Xtest, ytest, test_size=0.67, random_state=42)
Yhat, Ytest = km.evaluate(model, X_fit_threshold, y_fit_threshold, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
# threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
return km.results(y_real, y_predict)
# res.append(results_from_training_model(epochs = 6, l = 5))
# res.append(results_from_training_model(epochs = 6, l = 6))
# res.append(results_from_training_model(epochs = 6, l = 6, m = 12))
# res.append(results_from_training_model(epochs = 6, l = 5, m = 11))
# res.append(results_from_training_model(epochs = 6, l = 5, m = 9))
# res.append(results_from_training_model(epochs = 6, l = 4))
# res.append(results_from_training_model(epochs = 6, k = 24))
# res.append(results_from_training_model(epochs = 6, k = 26))
# res.append(results_from_training_model(epochs = 6, k = 30))
# res.append(results_from_training_model(epochs = 6, k = 28))
# res.append(results_from_training_model(epochs = 6, k = 27))
# res.append(results_from_training_model(epochs = 6, k = 26))
res.append(results_from_training_model(epochs = 6, k = 25))
res.append(results_from_training_model(epochs = 6, k = 26))
# res.append(results_from_training_model(epochs = 6, l = 6, m = 12))
[x[3] for x in res]
# -
results_list
results_list #50, 100, units, 0.85 threshold, 10 batch size, 1/5 train data
results_list #50, 100, units, 0.85 threshold, 10 batch size, 1/5 data, 0.2 train data, 0.8 test data
results_list #50, 100, units, 0.85 threshold, 10 batch size, 0.2 train data, 0.8 test data
results_list #50, 100, units, 0.85 threshold, 10 batch size, 1/5 train data
results_list #50, 100, units, 0.85 threshold, 10 batch size, 1/5 train data
results_list #50, 100, units, 0.85 threshold, 10 batch size
results_list #50, 100, units, 0.85 threshold, 12 batch size
results_list #50, 100, units, 0.85 threshold, 8 batch size
results_list #50, 100, units, 0.90 threshold, 10 batch size
results_list #70, 140, units, 0.80 threshold
results_list #50, 100, units, 0.80 threshold
results_list #30, 60, units, 0.80 threshold
results_list #25, 50, units, 0.80 threshold
results_list
results_list
results_list
results_list
results_list
results_list
results_list
[x[3] for x in results_list]
plot_cf_matrix(y_real, y_predict)
plot_cf_matrix(y_real, y_predict)
# +
#extra metrics
# +
# fpr, tpr, thresholds = roc_curve(Ytest, Yhat)
# +
# auc(fpr, tpr)
# +
# plt.plot(fpr, tpr)
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('ROC curve')
# plt.show()
# -
# +
#general model training architecture
# +
model_names = {1: 'Simple LSTM',
2: 'Stacked LSTM',
3: 'Bidirectional LSTM',
4: 'CNN',
5: 'CNN LSTM',
6: 'LSTM Autoencoder',
7: 'Deep CNN',
8: 'GRU',
9: 'GRU CNN'}
def plot_graphs_metrics(model, results_list, steps_in, steps_out):
model_name = model_names[model]
#this block of code is because some models (like 6) require steps_in start at 3 instead of 1
shift_vals = {1: 1,
3: 1,
6: 3,
9: 2}
shift_val = shift_vals[model]
#plot graph of a metric result for all n_step_in and n_step_out values
x=list(range(1, steps_out))
label = ['accuracy', 'TNR', 'NPV', 'f1']
for z in range(4):
for i in range(steps_in-shift_val):
y=[]
for j in range(steps_out-1):
y.append(results_list[i*(steps_out-1):i*(steps_out-1) + (steps_out-1)][j][z])
plt.plot(x, y, label=f'n_steps_in={i+shift_val}')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("n_steps_out")
plt.ylabel(label[z])
plt.title(model_name + ", 3 layers (50,25,1),\n name='first_lstm', recurrent_dropout=0.1 \n optimizer='adam', loss='binary_crossentropy' ")
plt.savefig(f"figures/{model_name} {label[z]}.png", bbox_inches="tight")
plt.close()
#plot graph of all metric results for a n_step_in value
x=list(range(1, steps_out))
label = ['accuracy', 'TNR', 'NPV', 'f1']
for z in range(steps_in-shift_val):
for i in range(4):
y=[]
for j in range(steps_out-1):
y.append(results_list[z*(steps_out-1):z*(steps_out-1)+(steps_out-1)][j][i])
plt.plot(x, y, label=label[i])
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("n_steps_out")
plt.ylabel('Metric value')
plt.title(f"{model_name}, 3 layers (50,25,1),\n name='first_lstm', recurrent_dropout=0.1 \n optimizer='adam', loss='binary_crossentropy' \n n_steps_in={z+shift_val} ")
plt.savefig(f"figures/{model_name} n_steps_in={z+shift_val}.png", bbox_inches="tight")
plt.close()
# +
#Helper functions
def get_Xtrain_Xtest_ytrain_ytest_scalers(i, j, resample):
'''
Returns (Xtrain, Xtest, ytrain, ytest) based on number of steps in, number of steps out,
and the parameter specifying which resampler we're using (or None).
'''
X, Y = km.create_join_x_y_arr(reactor_list, n_steps_in=i, n_steps_out = j, binary=True)
X_normalize, Y_normalize, scalers = km.normalize(X, Y)
X_normalize = np.nan_to_num(X_normalize, nan=-1)
### reduces size of dataset to test size (=0.2 rn)
# Xtrash, X_normalize, Ytrash, Y_normalize = train_test_split(X_normalize, Y_normalize, test_size=0.20, random_state=42)
### reduces size of dataset
Xtrain, Xtest, ytrain, ytest = train_test_split(X_normalize, Y_normalize, test_size=0.30, random_state=42)
if resample != None:
Xtrain, ytrain = over_sample(Xtrain, ytrain, resample)
return Xtrain, Xtest, ytrain, ytest, scalers
#Model type 1
def train_SIMPLE_LSTM_model(epochs, steps_in, steps_out, resample):
results_list = []
for i in tqdm(range(7, steps_in)):
for j in tqdm(range(1, steps_out)):
Xtrain, Xtest, ytrain, ytest, scalers = get_Xtrain_Xtest_ytrain_ytest_scalers(i, j, resample)
features = Xtrain.shape[2]
model = Sequential()
model.add(LSTM(units=50, activation='relu', name='first_lstm', recurrent_dropout=0.1, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dense(25, activation='relu'))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer=Adam(learning_rate=0.0005), loss='binary_crossentropy')
# model.compile(optimizer='adam', loss='binary_crossentropy',
# metrics=[keras.metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5)])
model.fit(Xtrain, ytrain, epochs=epochs, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
results_list.append(km.results(y_real, y_predict))
print('inputs', (epochs, steps_in, steps_out), 'outputs', km.results(y_real, y_predict))
return results_list
#Model type 3
def train_BIDIRECTIONAL_LSTM_model(epochs, steps_in, steps_out, resample):
results_list = []
for i in range(1, steps_in):
for j in range(1, steps_out):
Xtrain, Xtest, ytrain, ytest = get_Xtrain_Xtest_ytrain_ytest_scalers(i, j, resample)
features = Xtrain.shape[2]
model = Sequential()
model.add(Bidirectional(LSTM(100, return_sequences=True, activation='relu')))
model.add(Bidirectional(LSTM(50, return_sequences=True, activation='relu')))
model.add(Bidirectional(LSTM(20, activation='relu')))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer=Adam(learning_rate=0.0001), loss='binary_crossentropy')
model.fit(Xtrain, ytrain, epochs=epochs, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
results_list.append(km.results(y_real, y_predict))
return results_list
#Model type 6
#For some reason we need steps_in to be at least 3 for this one
#This probably has to do with the pooling, Convolution, or Dropout layers
def train_LSTM_AUTOENCODER_model(epochs, steps_in, steps_out, resample):
results_list = []
for i in range(3, steps_in):
for j in range(1, steps_out):
Xtrain, Xtest, ytrain, ytest = get_Xtrain_Xtest_ytrain_ytest_scalers(i, j, resample)
features = Xtrain.shape[2]
model = Sequential()
model.add(Conv1D(filters=128,
kernel_size=2,
activation='relu',
name='extractor',
input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
model.add(Dropout(0.3))
model.add(MaxPooling1D(pool_size=2))
model.add(Bidirectional(LSTM(50, activation='relu', input_shape=(Xtrain.shape[1], Xtrain.shape[2]))))
model.add(RepeatVector(10))
model.add(Bidirectional(LSTM(50, activation='relu')))
model.add(Dense(1))
model.compile(optimizer=Adam(learning_rate=0.0001), loss='binary_crossentropy')
model.fit(Xtrain, ytrain, epochs=epochs, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
results_list.append(km.results(y_real, y_predict))
return results_list
#Model type 9
#steps_in must be at least 2 for this one.
def train_GRU_CNN_model(epochs, steps_in, steps_out, resample):
results_list = []
for i in range(2, steps_in):
for j in range(1, steps_out):
Xtrain, Xtest, ytrain, ytest = get_Xtrain_Xtest_ytrain_ytest_scalers(i, j, resample)
features = Xtrain.shape[2]
inp_seq = Input(shape=(Xtrain.shape[1], Xtrain.shape[2]))
x = Bidirectional(GRU(100, return_sequences=True))(inp_seq)
x = AveragePooling1D(2)(x)
x = Conv1D(100, 3, activation='relu', padding='same',
name='extractor')(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(1, activation="sigmoid")(x)
model = Model(inp_seq, out)
model.compile(optimizer=Adam(learning_rate=0.0001), loss='binary_crossentropy')
model.fit(Xtrain, ytrain, epochs=epochs, batch_size=10, shuffle=True)
Yhat, Ytest = km.evaluate(model, Xtest, ytest, scalers, binary=True)
y_real = Ytest.astype(int)
threshold = threshold_for_max_f1(y_real, Yhat)
y_predict = np.where(Yhat > threshold, 1, 0).astype(int)
results_list.append(km.results(y_real, y_predict))
return results_list
# +
#Code to run models
list_of_result_lists = {}
models_list = [1]
epochs = 6
steps_in = 7
steps_out = 1
resample = SMOTE
steps_in += 1
steps_out += 1
for m in models_list:
if m == 1:
results_list = train_SIMPLE_LSTM_model(epochs, steps_in, steps_out, resample)
elif m == 3:
results_list = train_BIDIRECTIONAL_LSTM_model(epochs, steps_in, steps_out, resample)
elif m == 6:
results_list = train_LSTM_AUTOENCODER_model(epochs, steps_in, steps_out, resample)
elif m == 9:
results_list = train_GRU_CNN_model(epochs, steps_in, steps_out, resample)
list_of_result_lists[m] = results_list
plot_graphs_metrics(m, results_list, steps_in, steps_out)
# -
results_list
results_list
list_of_result_lists
# +
# list_of_result_lists[6]
| Join all tables and run LSTM model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# # Title: IP Explorer
# <details>
# <summary> <u>Details...</u></summary>
#
# **Notebook Version:** 1.0<br>
# **Python Version:** Python 3.7 (including Python 3.6 - AzureML)<br>
# **Required Packages**: kqlmagic, msticpy, pandas, numpy, matplotlib, networkx, ipywidgets, ipython, scikit_learn, dnspython, ipwhois, folium, holoviews<br>
# **Platforms Supported**:
# - Azure Notebooks Free Compute
# - Azure Notebooks DSVM
# - OS Independent
#
# **Data Sources Required**:
# - Log Analytics
# - Heartbeat
# - SecurityAlert
# - SecurityEvent
# - AzureNetworkAnalytics_CL
#
# - (Optional)
# - VirusTotal (with API key)
# - Alienvault OTX (with API key)
# - IBM Xforce (with API key)
# - CommonSecurityLog
# </details>
#
#
# Brings together a series of queries and visualizations to help you assess the security state of an IP address. It works with both internal addresses and public addresses.
# <br> For internal addresses it focuses on traffic patterns and behavior of the host using that IP address.
# <br> For public IPs it lets you perform threat intelligence lookups, passive dns, whois and other checks.
# <br>It also allows you to examine any network traffic between the external IP address and your resources.
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Hunting-Hypothesis" data-toc-modified-id="Hunting-Hypothesis-1"><span class="toc-item-num">1 </span>Hunting Hypothesis</a></span><ul class="toc-item"><li><span><a href="#IP-Explorer-Mindmap" data-toc-modified-id="IP-Explorer-Mindmap-1.1"><span class="toc-item-num">1.1 </span>IP Explorer Mindmap</a></span></li><li><span><a href="#Notebook-initialization" data-toc-modified-id="Notebook-initialization-1.2"><span class="toc-item-num">1.2 </span>Notebook initialization</a></span></li><li><span><a href="#Get-WorkspaceId-and-Authenticate-to-Log-Analytics" data-toc-modified-id="Get-WorkspaceId-and-Authenticate-to-Log-Analytics-1.3"><span class="toc-item-num">1.3 </span>Get WorkspaceId and Authenticate to Log Analytics</a></span></li></ul></li><li><span><a href="#Enter-the-IP-Address-and-query-time-window" data-toc-modified-id="Enter-the-IP-Address-and-query-time-window-2"><span class="toc-item-num">2 </span>Enter the IP Address and query time window</a></span></li><li><span><a href="#Determine-IP-Address-Type" data-toc-modified-id="Determine-IP-Address-Type-3"><span class="toc-item-num">3 </span>Determine IP Address Type</a></span></li><li><span><a href="#External-IP" data-toc-modified-id="External-IP-4"><span class="toc-item-num">4 </span>External IP</a></span><ul class="toc-item"><li><span><a href="#GeoIP-Lookups-for-External-IP-Addresses" data-toc-modified-id="GeoIP-Lookups-for-External-IP-Addresses-4.1"><span class="toc-item-num">4.1 </span>GeoIP Lookups for External IP Addresses</a></span></li><li><span><a href="#Whois-Registrars-for-External-IP-Addresses" data-toc-modified-id="Whois-Registrars-for-External-IP-Addresses-4.2"><span class="toc-item-num">4.2 </span>Whois Registrars for External IP Addresses</a></span></li><li><span><a href="#Opensource-and-Azure-Sentinel-ThreatIntel-Lookups" data-toc-modified-id="Opensource-and-Azure-Sentinel-ThreatIntel-Lookups-4.3"><span class="toc-item-num">4.3 </span>Opensource and Azure Sentinel ThreatIntel Lookups</a></span><ul class="toc-item"><li><span><a href="#Configure-your-TI-Provider-settings" data-toc-modified-id="Configure-your-TI-Provider-settings-4.3.1"><span class="toc-item-num">4.3.1 </span>Configure your TI Provider settings</a></span></li></ul></li><li><span><a href="#Passive-DNS-lookups-for-External-IP-Addresses" data-toc-modified-id="Passive-DNS-lookups-for-External-IP-Addresses-4.4"><span class="toc-item-num">4.4 </span>Passive DNS lookups for External IP Addresses</a></span></li></ul></li><li><span><a href="#Internal-IP-Address" data-toc-modified-id="Internal-IP-Address-5"><span class="toc-item-num">5 </span>Internal IP Address</a></span><ul class="toc-item"><li><span><a href="#Data-Sources-available-to-query-related-to-IP" data-toc-modified-id="Data-Sources-available-to-query-related-to-IP-5.1"><span class="toc-item-num">5.1 </span>Data Sources available to query related to IP</a></span></li><li><span><a href="#Check-if-IP-is-assigned-to-multiple-hostnames" data-toc-modified-id="Check-if-IP-is-assigned-to-multiple-hostnames-5.2"><span class="toc-item-num">5.2 </span>Check if IP is assigned to multiple hostnames</a></span></li><li><span><a href="#System-Info" data-toc-modified-id="System-Info-5.3"><span class="toc-item-num">5.3 </span>System Info</a></span></li><li><span><a href="#ServiceMap---Get-List-of-Services-for-Host" data-toc-modified-id="ServiceMap---Get-List-of-Services-for-Host-5.4"><span class="toc-item-num">5.4 </span>ServiceMap - Get List of Services for Host</a></span></li></ul></li><li><span><a href="#Related-Alerts" data-toc-modified-id="Related-Alerts-6"><span class="toc-item-num">6 </span>Related Alerts</a></span><ul class="toc-item"><li><span><a href="#Visualization---Timeline-of-Related-Alerts" data-toc-modified-id="Visualization---Timeline-of-Related-Alerts-6.1"><span class="toc-item-num">6.1 </span>Visualization - Timeline of Related Alerts</a></span></li><li><span><a href="#Browse-List-of-Related-Alerts" data-toc-modified-id="Browse-List-of-Related-Alerts-6.2"><span class="toc-item-num">6.2 </span>Browse List of Related Alerts</a></span></li></ul></li><li><span><a href="#Related-Hosts" data-toc-modified-id="Related-Hosts-7"><span class="toc-item-num">7 </span>Related Hosts</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#NOTE---the-following-sections-are-only-relevant-for-Internal-IP-Addresses." data-toc-modified-id="NOTE---the-following-sections-are-only-relevant-for-Internal-IP-Addresses.-7.0.1"><span class="toc-item-num">7.0.1 </span><strong>NOTE - the following sections are only relevant for Internal IP Addresses.</strong></a></span></li></ul></li><li><span><a href="#Visualization---Networkx-Graph" data-toc-modified-id="Visualization---Networkx-Graph-7.1"><span class="toc-item-num">7.1 </span>Visualization - Networkx Graph</a></span></li></ul></li><li><span><a href="#Related-Accounts" data-toc-modified-id="Related-Accounts-8"><span class="toc-item-num">8 </span>Related Accounts</a></span><ul class="toc-item"><li><span><a href="#Visualization---Networkx-Graph" data-toc-modified-id="Visualization---Networkx-Graph-8.1"><span class="toc-item-num">8.1 </span>Visualization - Networkx Graph</a></span></li></ul></li><li><span><a href="#Logon-Summary-for-Related-Entities" data-toc-modified-id="Logon-Summary-for-Related-Entities-9"><span class="toc-item-num">9 </span>Logon Summary for Related Entities</a></span><ul class="toc-item"><li><span><a href="#HeatMap-for-Weekly-failed-logons" data-toc-modified-id="HeatMap-for-Weekly-failed-logons-9.1"><span class="toc-item-num">9.1 </span>HeatMap for Weekly failed logons</a></span></li><li><span><a href="#Host-Logons-Timeline" data-toc-modified-id="Host-Logons-Timeline-9.2"><span class="toc-item-num">9.2 </span>Host Logons Timeline</a></span></li><li><span><a href="#Failed-Logons-Timeline" data-toc-modified-id="Failed-Logons-Timeline-9.3"><span class="toc-item-num">9.3 </span>Failed Logons Timeline</a></span></li></ul></li><li><span><a href="#Network-Connection-Analysis" data-toc-modified-id="Network-Connection-Analysis-10"><span class="toc-item-num">10 </span>Network Connection Analysis</a></span><ul class="toc-item"><li><span><a href="#Network-Check-Communications-with-Other-Hosts" data-toc-modified-id="Network-Check-Communications-with-Other-Hosts-10.1"><span class="toc-item-num">10.1 </span>Network Check Communications with Other Hosts</a></span></li><li><span><a href="#Query-Flows-by-IP-Address" data-toc-modified-id="Query-Flows-by-IP-Address-10.2"><span class="toc-item-num">10.2 </span>Query Flows by IP Address</a></span></li><li><span><a href="#Bulk-whois-lookup" data-toc-modified-id="Bulk-whois-lookup-10.3"><span class="toc-item-num">10.3 </span>Bulk whois lookup</a></span></li><li><span><a href="#Choose-ASNs/IPs-to-Check-for-Threat-Intel-Reports" data-toc-modified-id="Choose-ASNs/IPs-to-Check-for-Threat-Intel-Reports-10.4"><span class="toc-item-num">10.4 </span>Choose ASNs/IPs to Check for Threat Intel Reports</a></span></li><li><span><a href="#GeoIP-Map-of-External-IPs" data-toc-modified-id="GeoIP-Map-of-External-IPs-10.5"><span class="toc-item-num">10.5 </span>GeoIP Map of External IPs</a></span></li><li><span><a href="#Outbound-Data-transfer-Time-Series-Anomalies" data-toc-modified-id="Outbound-Data-transfer-Time-Series-Anomalies-10.6"><span class="toc-item-num">10.6 </span>Outbound Data transfer Time Series Anomalies</a></span></li></ul></li><li><span><a href="#Conclusion" data-toc-modified-id="Conclusion-11"><span class="toc-item-num">11 </span>Conclusion</a></span><ul class="toc-item"><li><span><a href="#List-of-Suspicious-Activities/-Observables/Hunting-bookmarks" data-toc-modified-id="List-of-Suspicious-Activities/-Observables/Hunting-bookmarks-11.1"><span class="toc-item-num">11.1 </span>List of Suspicious Activities/ Observables/Hunting bookmarks</a></span></li></ul></li><li><span><a href="#Appendices" data-toc-modified-id="Appendices-12"><span class="toc-item-num">12 </span>Appendices</a></span></li><li><span><a href="#Available-DataFrames" data-toc-modified-id="Available-DataFrames-13"><span class="toc-item-num">13 </span>Available DataFrames</a></span><ul class="toc-item"><li><span><a href="#Saving-Data-to-Excel" data-toc-modified-id="Saving-Data-to-Excel-13.1"><span class="toc-item-num">13.1 </span>Saving Data to Excel</a></span></li></ul></li><li><span><a href="#Configuration" data-toc-modified-id="Configuration-14"><span class="toc-item-num">14 </span>Configuration</a></span><ul class="toc-item"><li><span><a href="#msticpyconfig.yaml-configuration-File" data-toc-modified-id="msticpyconfig.yaml-configuration-File-14.1"><span class="toc-item-num">14.1 </span><code>msticpyconfig.yaml</code> configuration File</a></span></li></ul></li></ul></div>
# -
# <a></a>[Contents](#toc)
# ## Hunting Hypothesis
# Our broad initial hunting hypothesis is that a we have received IP address entity which is suspected to be compromized internal host or external public address to whom internal hosts are communicating in malicious manner, we will need to hunt from a range of different positions to validate or disprove this hypothesis.
#
# <a></a>[Contents](#toc)
# ### IP Explorer Mindmap
# Below mindmap diagram shows hunting workflow depending upon the type of IP address provided
#
# ![IPExplorerMindMap](https://github.com/Azure/Azure-Sentinel-Notebooks/raw/master/images/nb_ipexplorer-mindmap.png)
# ---
# ### Notebook initialization
# The next cell:
# - Checks for the correct Python version
# - Checks versions and optionally installs required packages
# - Imports the required packages into the notebook
# - Sets a number of configuration options.
#
# This should complete without errors. If you encounter errors or warnings look at the following two notebooks:
# - [TroubleShootingNotebooks](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/TroubleShootingNotebooks.ipynb)
# - [ConfiguringNotebookEnvironment](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb)
#
# If you are running in the Azure Sentinel Notebooks environment (Azure Notebooks or Azure ML) you can run live versions of these notebooks:
# - [Run TroubleShootingNotebooks](./TroubleShootingNotebooks.ipynb)
# - [Run ConfiguringNotebookEnvironment](./ConfiguringNotebookEnvironment.ipynb)
#
# You may also need to do some additional configuration to successfully use functions such as Threat Intelligence service lookup and Geo IP lookup.
# There are more details about this in the `ConfiguringNotebookEnvironment` notebook and in these documents:
# - [msticpy configuration](https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html)
# - [Threat intelligence provider configuration](https://msticpy.readthedocs.io/en/latest/data_acquisition/TIProviders.html#configuration-file)
#
# +
from pathlib import Path
import os
import sys
import warnings
from IPython.display import display, HTML, Markdown
REQ_PYTHON_VER=(3, 6)
REQ_MSTICPY_VER=(0, 6, 0)
display(HTML("<h3>Starting Notebook setup...</h3>"))
if Path("./utils/nb_check.py").is_file():
from utils.nb_check import check_python_ver, check_mp_ver
check_python_ver(min_py_ver=REQ_PYTHON_VER)
try:
check_mp_ver(min_msticpy_ver=REQ_MSTICPY_VER)
except ImportError:
# !pip install --upgrade msticpy
if "msticpy" in sys.modules:
importlib.reload(sys.modules["msticpy"])
else:
import msticpy
check_mp_ver(REQ_MSTICPY_VER)
# If not using Azure Notebooks, install msticpy with
# # !pip install msticpy
from msticpy.nbtools import nbinit
extra_imports = [
"msticpy.nbtools.entityschema, IpAddress",
"msticpy.nbtools.entityschema, GeoLocation",
"msticpy.sectools.ip_utils, create_ip_record",
"msticpy.sectools.ip_utils, get_ip_type",
"msticpy.sectools.ip_utils, get_whois_info",
]
nbinit.init_notebook(
namespace=globals(),
extra_imports=extra_imports,
);
WIDGET_DEFAULTS = {
"layout": widgets.Layout(width="95%"),
"style": {"description_width": "initial"},
}
# -
# <a></a>[Contents](#toc)
# ### Get WorkspaceId and Authenticate to Log Analytics
# <details>
# <summary> <u>Details...</u></summary>
# If you are using user/device authentication, run the following cell.
# - Click the 'Copy code to clipboard and authenticate' button.
# - This will pop up an Azure Active Directory authentication dialog (in a new tab or browser window). The device code will have been copied to the clipboard.
# - Select the text box and paste (Ctrl-V/Cmd-V) the copied value.
# - You should then be redirected to a user authentication page where you should authenticate with a user account that has permission to query your Log Analytics workspace.
#
# Use the following syntax if you are authenticating using an Azure Active Directory AppId and Secret:
# ```
# # %kql loganalytics://tenant(aad_tenant).workspace(WORKSPACE_ID).clientid(client_id).clientsecret(client_secret)
# ```
# instead of
# ```
# # %kql loganalytics://code().workspace(WORKSPACE_ID)
# ```
#
# Note: you may occasionally see a JavaScript error displayed at the end of the authentication - you can safely ignore this.<br>
# On successful authentication you should see a ```popup schema``` button.
# To find your Workspace Id go to [Log Analytics](https://ms.portal.azure.com/#blade/HubsExtension/Resources/resourceType/Microsoft.OperationalInsights%2Fworkspaces). Look at the workspace properties to find the ID.
# </details>
#See if we have an Azure Sentinel Workspace defined in our config file, if not let the user specify Workspace and Tenant IDs
from msticpy.nbtools.wsconfig import WorkspaceConfig
ws_config = WorkspaceConfig()
try:
ws_id = ws_config['workspace_id']
ten_id = ws_config['tenant_id']
config = True
md("Workspace details collected from config file")
except KeyError:
md(('Please go to your Log Analytics workspace, copy the workspace ID'
' and/or tenant Id and paste here to enable connection to the workspace and querying of it..<br> '))
ws_id_wgt = nbwidgets.GetEnvironmentKey(env_var='WORKSPACE_ID',
prompt='Please enter your Log Analytics Workspace Id:', auto_display=True)
ten_id_wgt = nbwidgets.GetEnvironmentKey(env_var='TENANT_ID',
prompt='Please enter your Log Analytics Tenant Id:', auto_display=True)
config = False
# Authentication
qry_prov = QueryProvider(data_environment="LogAnalytics")
qry_prov.connect(connection_str=ws_config.code_connect_str)
table_index = qry_prov.schema_tables
# <a></a>[Contents](#toc)
# ## Enter the IP Address and query time window
#
# Type the IP address you want to search for and the time bounds over which search.
#
# You can specify the IP address value in the widget e.g. 192.168.1.1
ipaddr_text = widgets.Text(
description="Enter the IP Address to search for:", **WIDGET_DEFAULTS
)
display(ipaddr_text)
query_times = nbwidgets.QueryTime(units="day", max_before=20, before=5, max_after=7)
query_times.display()
# <a></a>[Contents](#toc)
# ## Determine IP Address Type
# +
# Set up function to allow easy reference to common parameters for queries throughout the notebook
def ipaddr_query_params():
return {
"start": query_times.start,
"end": query_times.end,
"ip_address": ipaddr_text.value.strip()
}
ipaddr_type = get_ip_type(ipaddr_query_params()['ip_address'])
md(f'Depending on the IP Address origin, different sections of this notebook are applicable', styles=["bold", "large"])
md(f'Please follow either the Interal IP Address or External IP Address sections based on below Recommendation', styles=["bold"])
#Get details from Heartbeat table for the given IP Address and Time Parameters
heartbeat_df = qry_prov.Heartbeat.get_info_by_ipaddress(**ipaddr_query_params())
# Set hostnames retrived from Heartbeat table if available
if not heartbeat_df.empty:
hostname = heartbeat_df["Computer"][0]
else:
hostname = ""
if not heartbeat_df.empty:
ipaddr_origin = "Internal"
md(f'IP Address type based on subnet: {ipaddr_type} & IP Address Owner based on available logs : {ipaddr_origin}', styles=["blue","bold"])
display(Markdown('#### Recommendation - Go to section [InternalIP](#goto_internalIP)'))
elif ipaddr_type=="Private" and heartbeat_df.empty:
ipaddr_origin = "Unknown"
md(f'IP Address type based on subnet: {ipaddr_type} & IP Address Owner based on available logs : {ipaddr_origin}', styles=["blue","bold"])
display(Markdown('#### Recommendation - Go to section [InternalIP](#goto_internalIP)'))
else:
ipaddr_origin = "External"
md(f'IP Address type based on subnet: {ipaddr_type} & IP Address Owner based on available logs : {ipaddr_origin}', styles=["blue","bold"])
display(Markdown('#### Recommendation - Go to section [ExternalIP](#goto_externalIP)'))
#Populate related IP addresses for the calculated hostname
az_net_df = pd.DataFrame()
if "AzureNetworkAnalytics_CL" in table_index:
aznet_query = f"""
AzureNetworkAnalytics_CL | where ResourceType == 'NetworkInterface'
| where SubType_s == "Topology"
| search \'{ipaddr_text.value}\'
| where TimeGenerated >= datetime({query_times.start})
| where TimeGenerated <= datetime({query_times.end})
| where VirtualMachine_s has '{hostname}'
| top 1 by TimeGenerated desc
| project PrivateIPAddresses = PrivateIPAddresses_s, PublicIPAddresses = PublicIPAddresses_s"""
az_net_df = qry_prov.exec_query(query=aznet_query)
# Create IP Entity record using available dataframes or input ip address if nothing present
if az_net_df.empty and heartbeat_df.empty:
ip_entity = IpAddress()
ip_entity['Address'] = ipaddr_query_params()['ip_address']
ip_entity['Type'] = 'ipaddress'
ip_entity['OSType'] = 'Unknown'
md('No Heartbeat Data and Network topology data found')
elif not heartbeat_df.empty:
if az_net_df.empty:
ip_entity = create_ip_record(
heartbeat_df=heartbeat_df)
else:
ip_entity = create_ip_record(
heartbeat_df=heartbeat_df, az_net_df=az_net_df)
#Display IP Entity
md("Displaying IP Entity", styles=["green","bold"])
print(ip_entity)
# -
# <a id='goto_externalIP'></a>
# ## External IP
# <a></a>[Contents](#toc)
# ### GeoIP Lookups for External IP Addresses
# msticpy- geoip module to retrieving Geo Location for Public IP addresses
# To force Threatinel lookup for Internal public IP, replace and with or in if condition
if ipaddr_type == "Public" and ipaddr_origin == "External" :
iplocation = GeoLiteLookup()
loc_results, ext_ip_entity = iplocation.lookup_ip(ip_address=ipaddr_query_params()['ip_address'])
md(
'Geo Location for the IP Address ::', styles=["bold","green"]
)
print(ext_ip_entity[0])
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Whois Registrars for External IP Addresses
# ipwhois module to retrieve whois registrar for Public IP addresses
# To force Threatinel lookup for Internal public IP, replace and with or in if condition
if ipaddr_type == "Public" and ipaddr_origin == "External" :
from ipwhois import IPWhois
whois = IPWhois(ipaddr_query_params()['ip_address'])
whois_result = whois.lookup_whois()
if whois_result:
md(f'Whois Registrar Info ::', styles=["bold","green"])
display(whois_result)
else:
md(
f'No whois records available', styles=["bold","orange"]
)
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Opensource and Azure Sentinel ThreatIntel Lookups
# #### Configure your TI Provider settings
# If you have not used threat intelligence lookups before you will need to supply API keys for the
# TI Providers that you want to use. Please see the section on configuring [msticpyconfig.yaml](#msticpyconfig.yaml-configuration-File)
#
# Then reload provider settings:
# ```
# mylookup = TILookup()
# mylookup.reload_provider_settings()
# ```
# To force Threatinel lookup for Internal public IP, replace and with or in if condition
if ipaddr_type == "Public" and ipaddr_origin == "External" :
mylookup = TILookup()
mylookup.loaded_providers
resp = mylookup.lookup_ioc(observable=ipaddr_query_params()['ip_address'], ioc_type="ipv4")
md(f'ThreatIntel Lookup for IP ::', styles=["bold","green"])
display(mylookup.result_to_df(resp).T)
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Passive DNS lookups for External IP Addresses
# To force Passive DNS lookup for Internal public IP, change and with or in if
if ipaddr_type == "Public" and ipaddr_origin == "External" :
# retrieve passive dns from TI Providers
pdns = mylookup.lookup_ioc(
observable=ipaddr_query_params()['ip_address'],
ioc_type="ipv4",
ioc_query_type="passivedns",
providers=["XForce"],
)
pdns_df = mylookup.result_to_df(pdns)
if not pdns_df.empty and pdns_df["RawResult"][0] and "RDNS" in pdns_df["RawResult"][0]:
pdnsdomains = pdns_df["RawResult"][0]["RDNS"]
md(
'Passive DNS domains for IP: {pdnsdomains}',styles=["bold","green"]
)
display(mylookup.result_to_df(pdns).T)
else:
md(
'No passive domains found from the providers', styles=["bold","orange"]
)
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a id='goto_internalIP'></a>
# ## Internal IP Address
# <a></a>[Contents](#toc)
# ### Data Sources available to query related to IP
if ipaddr_origin in ["Internal","Unknown"]:
# KQL query for full text search of IP address and display all datatypes populated for the time period
datasource_status = """
search \'{ip_address}\' or \'{hostname}\'
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| summarize RowCount=count() by Table=$table
""".format(
**ipaddr_query_params(), hostname=hostname
)
datasource_status_df = qry_prov.exec_query(datasource_status)
# Display result as transposed matrix of datatypes availabel to query for the query period
if not datasource_status_df.empty:
available_datasets = datasource_status_df['Table'].values
md("Datasources available to query for IP ::", styles=["green","bold"])
display(datasource_status_df)
else:
md_warn("No datasources contain given IP address for the query period")
else:
md(f'Analysis section Not Applicable since IP address type is: {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Check if IP is assigned to multiple hostnames
if ipaddr_origin == "Internal" or not datasource_status_df.empty:
# Get single event - try process creation
if ip_entity['OSType'] =='Windows':
if "SecurityEvent" not in available_datasets:
raise ValueError("No Windows event log data available in the workspace")
host_name = None
matching_hosts_df = qry_prov.WindowsSecurity.list_host_processes(
query_times, host_name=hostname, add_query_items="| distinct Computer"
)
elif ip_entity['OSType'] =='Linux':
if "Syslog" not in available_datasets:
raise ValueError("No Linux syslog data available in the workspace")
else:
linux_syslog_query = f""" Syslog | where TimeGenerated >= datetime({query_times.start}) | where TimeGenerated <= datetime({query_times.end}) | where HostIP == '{ipaddr_text.value}' | distinct Computer """
matching_hosts_df = qry_prov.exec_query(query=linux_syslog_query)
if len(matching_hosts_df) > 1:
print(f"Multiple matches for '{hostname}'. Please select a host from the list.")
choose_host = nbwidgets.SelectItem(
item_list=list(matching_hosts_df["Computer"].values),
description="Select the host.",
auto_display=True,
)
elif not matching_hosts_df.empty:
host_name = matching_hosts_df["Computer"].iloc[0]
print(f"Unique host found for IP: {hostname}")
elif datasource_status_df.empty:
md_warn("No datasources contain given IP address for the query period")
else:
md(f'Analysis section Not Applicable since IP address type is : {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### System Info
# Retrieving System info from internal table if IP address is not Public
if ipaddr_origin == "Internal" and not heartbeat_df.empty:
md(
'System Info retrieved from Heartbeat table ::', styles=["green","bold"]
)
display(heartbeat_df.T)
else:
md_warn(
'No records available in HeartBeat table'
)
# [Contents](#toc)
# ### ServiceMap - Get List of Services for Host
if ipaddr_origin == "Internal":
if "ServiceMapProcess_CL" not in available_datasets:
md_warn("ServiceMap data is not enabled")
md(
f"Enable ServiceMap Solution from Azure marketplce: <br>"
+"https://docs.microsoft.com/en-us/azure/azure-monitor/insights/service-map#enable-service-map",
styles=["bold"]
)
else:
servicemap_proc_query = """
ServiceMapProcess_CL
| where Computer == \'{hostname}\'
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| project Computer, Services_s, DisplayName_s, ExecutableName_s , ExecutablePath_s
""".format(
hostname=hostname, **ipaddr_query_params()
)
servicemap_proc_df = qry_prov.exec_query(servicemap_proc_query)
display(servicemap_proc_df)
else:
md(f'Analysis section Not Applicable since IP address type is {ipaddr_type}', styles=["bold","red"])
# ## Related Alerts
ra_query_times = nbwidgets.QueryTime(
units="day",
origin_time=query_times.origin_time,
max_before=28,
max_after=5,
before=5,
auto_display=True,
)
# ### Visualization - Timeline of Related Alerts
# +
#Provide hostname if present to the query
if hostname:
md(f"Searching for alerts related to {hostname}...")
related_alerts = qry_prov.SecurityAlert.list_related_alerts(
ra_query_times, host_name=hostname
)
else:
md(f"Searching for alerts related to ip address(es) {ipaddr_query_params()['ip_address']}")
related_alerts = qry_prov.SecurityAlert.list_alerts_for_ip(
ra_query_times, source_ip_list=ipaddr_query_params()['ip_address']
)
def print_related_alerts(alertDict, entityType, entityName):
if len(alertDict) > 0:
md(
f"Found {len(alertDict)} different alert types related to this {entityType} (`{entityName}`)",styles=["bold","orange"]
)
for (k, v) in alertDict.items():
print(f"- {k}, # Alerts: {v}")
else:
print(f"No alerts for {entityType} entity `{entityName}`")
if isinstance(related_alerts, pd.DataFrame) and not related_alerts.empty:
host_alert_items = (
related_alerts[["AlertName", "TimeGenerated"]]
.groupby("AlertName")
.TimeGenerated.agg("count")
.to_dict()
)
print_related_alerts(host_alert_items, "host", hostname)
nbdisplay.display_timeline(
data=related_alerts, title="Alerts", source_columns=["AlertName"], height=200
)
else:
md("No related alerts found.",styles=["bold","green"])
# -
# ### Browse List of Related Alerts
# Select an Alert to view details
# +
def disp_full_alert(alert):
global related_alert
related_alert = SecurityAlert(alert)
nbdisplay.display_alert(related_alert, show_entities=True)
recenter_wgt = widgets.Checkbox(
value=True,
description='Center subsequent query times round selected Alert?',
disabled=False,
**WIDGET_DEFAULTS
)
if related_alerts is not None and not related_alerts.empty:
related_alerts["CompromisedEntity"] = related_alerts["Computer"]
md("Click on alert to view details.", styles=["bold"])
display(recenter_wgt)
rel_alert_select = nbwidgets.SelectAlert(
alerts=related_alerts,
action=disp_full_alert,
)
rel_alert_select.display()
# -
# <a></a>[Contents](#toc)
# ## Related Hosts
# **Hypothesis:** That an attacker has gained access to the host, compromized credentials for the accounts and laterally moving to the network gaining access to more hosts.
#
# This section provides related hosts of IP address which is being investigated. .If you wish to expand the scope of hunting then investigate each hosts in detail, it is recommended that to use the **Host Explorer (Windows/Linux)**
# - [Entity Explorer - Windows Host](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/Entity%20Explorer%20-%20Windows%20Host.ipynb)
# - [Entity Explorer - Linux Host](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/Entity%20Explorer%20-%20Linux%20Host.ipynb)
#
# If you are running in the Azure Sentinel Notebooks environment (Azure Notebooks or Azure ML) you can run live versions of these notebooks:
# - [Run Entity Explorer - Windows Host](./Entity%20Explorer%20-%20Windows%20Host.ipynb)
# - [Run Entity Explorer - Linux Host](./Entity%20Explorer%20-%20Linux%20Host.ipynb)
#
# #### __NOTE - the following sections are only relevant for Internal IP Addresses.__
# <a></a>[Contents](#toc)
# ### Visualization - Networkx Graph
import networkx as nx
if ipaddr_origin == "Internal":
# Retrived relatd accounts from SecurityEvent table for Windows OS
if ip_entity['OSType'] =='Windows':
if "SecurityEvent" not in available_datasets:
raise ValueError("No Windows event log data available in the workspace")
else:
related_hosts = """
SecurityEvent
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| where IpAddress == \'{ip_address}\' or Computer == \'{hostname}\'
| summarize count() by Computer, IpAddress
""".format(
**ipaddr_query_params(), hostname=hostname
)
related_hosts_df = qry_prov.exec_query(related_hosts)
elif ip_entity['OSType'] =='Linux':
if "Syslog" not in available_datasets:
raise ValueError("No Linux syslog data available in the workspace")
else:
related_hosts_df = qry_prov.LinuxSyslog.list_logons_for_source_ip(invest_times, ip_address=ipaddr_query_params()['ip_address'],add_query_items='extend IpAddress = HostIP | summarize count() by Computer, IpAddress')
# Displaying networkx - static graph. for interactive graph uncomment and run next block of code.
plt.figure(10, figsize=(22, 14))
g = nx.from_pandas_edgelist(related_hosts_df, "IpAddress", "Computer")
md('Entity Relationship Graph - Related Hosts :: ',styles=["bold","green"])
nx.draw_circular(g, with_labels=True, size=40, font_size=12, font_color="blue")
# Uncomment below cells if you want to dispaly interactive graphs using Pyvis library, Azure notebook free tier may not render the graph correctly.
# logonpyvis_graph = Network(notebook=True, height="750px", width="100%", bgcolor="#222222", font_color="white")
# # set the physics layout of the network
# logonpyvis_graph.barnes_hut()
# sources = related_hosts_df['Computer']
# targets = related_hosts_df['IpAddress']
# weights = related_hosts_df['count_']
# edge_data = zip(sources, targets, weights)
# for e in edge_data:
# src = e[0]
# dst = e[1]
# w = e[2]
# logonpyvis_graph.add_node(src, src, title=src)
# logonpyvis_graph.add_node(dst, dst, title=dst)
# logonpyvis_graph.add_edge(src, dst, value=w)
# neighbor_map = logonpyvis_graph.get_adj_list()
# # add neighbor data to node hover data
# for node in logonpyvis_graph.nodes:
# node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
# node["value"] = len(neighbor_map[node["id"]])
# logonpyvis_graph.show("hostlogonpyvis_graph.html")
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ## Related Accounts
# **Hypothesis:** That an attacker has gained access to the host, compromized credentials for the accounts on it and laterally moving to the network gaining access to more accounts.
#
# This section provides related accounts of IP address which is being investigated. .If you wish to expand the scope of hunting then investigate each accounts in detail, it is recommended that to use the **Account Explorer.**
# - [Entity Explorer - Account](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/Entity%20Explorer%20-%20Account.ipynb)
#
# If you are running in the Azure Sentinel Notebooks environment (Azure Notebooks or Azure ML) you can run live versions of these notebooks:
# - [Run Entity Explorer - Account](./Entity%20Explorer%20-%20Account.ipynb)
# <a></a>[Contents](#toc)
# ### Visualization - Networkx Graph
if ipaddr_origin == "Internal":
# Retrived relatd accounts from SecurityEvent table for Windows OS
if ip_entity['OSType'] =='Windows':
if "SecurityEvent" not in available_datasets:
raise ValueError("No Windows event log data available in the workspace")
else:
related_accounts = """
SecurityEvent
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| where IpAddress == \'{ip_address}\' or Computer == \'{hostname}\'
| summarize count() by Account, Computer
""".format(
**ipaddr_query_params(), hostname=hostname
)
related_accounts_df = qry_prov.exec_query(related_accounts)
elif ip_entity['OSType'] =='Linux':
if "Syslog" not in available_datasets:
raise ValueError("No Linux syslog data available in the workspace")
else:
related_accounts_df = qry_prov.LinuxSyslog.list_logons_for_source_ip(invest_times, ip_address=ipaddr_query_params()['ip_address'],add_query_items='extend Account = AccountName | summarize count() by Account, Computer')
# Uncomment- below cells if above visualization does not render - Networkx connected Graph
plt.figure(10, figsize=(22, 14))
g = nx.from_pandas_edgelist(related_accounts_df, "Computer", "Account")
md('Entity Relationship Graph - Related Accounts :: ',styles=["bold","green"])
nx.draw_circular(g, with_labels=True, size=40, font_size=12, font_color="blue")
# Uncomment below cells if you want to display interactive graphs using Pyvis library, Azure notebook free tier may not render the graph correctly.
# acclogon_pyvisgraph = Network(notebook=True, height="750px", width="100%", bgcolor="#222222", font_color="white")
# # set the physics layout of the network
# acclogon_pyvisgraph.barnes_hut()
# sources = related_accounts_df['Computer']
# targets = related_accounts_df['Account']
# weights = related_accounts_df['count_']
# edge_data = zip(sources, targets, weights)
# for e in edge_data:
# src = e[0]
# dst = e[1]
# w = e[2]
# acclogon_pyvisgraph.add_node(src, src, title=src)
# acclogon_pyvisgraph.add_node(dst, dst, title=dst)
# acclogon_pyvisgraph.add_edge(src, dst, value=w)
# neighbor_map = acclogon_pyvisgraph.get_adj_list()
# # add neighbor data to node hover data
# for node in acclogon_pyvisgraph.nodes:
# node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
# node["value"] = len(neighbor_map[node["id"]]) # this value attrribute for the node affects node size
# acclogon_pyvisgraph.show("accountlogonpyvis_graph.html")
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ## Logon Summary for Related Entities
# **Hypothesis:** By analyzing logon activities of the related entities, we can identify change in logon patterns and narrow down the entities to few suspicious logon patterns.
#
# This section provides various visualization of logon attributes such as
# - Weekly Failed Logon trend
# - Logon Types
# - Logon Processes
#
# If you wish to expand the scope of hunting then investigate specific host in detail, it is recommended that to use the **Host Explorer (Windows/Linux)**
#
# - [Entity Explorer - Windows Host](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/Entity%20Explorer%20-%20Windows%20Host.ipynb)
# - [Entity Explorer - Linux Host](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/Entity%20Explorer%20-%20Linux%20Host.ipynb)
#
# If you are running in the Azure Sentinel Notebooks environment (Azure Notebooks or Azure ML) you can run live versions of these notebooks:
# - [Run Entity Explorer - Windows Host](./Entity%20Explorer%20-%20Windows%20Host.ipynb)
# - [Run Entity Explorer - Linux Host](./Entity%20Explorer%20-%20Linux%20Host.ipynb)
# <a></a>[Contents](#toc)
# ### HeatMap for Weekly failed logons
if ipaddr_origin == "Internal":
# Retrived related accounts from SecurityEvent table for Windows OS
if ip_entity['OSType'] =='Windows':
if "SecurityEvent" not in available_datasets:
raise ValueError("No Windows event log data available in the workspace")
else:
failed_logons = """
SecurityEvent
| where EventID in (4624,4625) | where IpAddress == \'{ip_address}\' or Computer == \'{hostname}\'
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| extend DayofWeek = case(dayofweek(TimeGenerated) == time(1.00:00:00), "Monday",
dayofweek(TimeGenerated) == time(2.00:00:00), "Tuesday",
dayofweek(TimeGenerated) == time(3.00:00:00), "Wednesday",
dayofweek(TimeGenerated) == time(4.00:00:00), "Thursday",
dayofweek(TimeGenerated) == time(5.00:00:00), "Friday",
dayofweek(TimeGenerated) == time(6.00:00:00), "Saturday",
"Sunday")
| summarize LogonCount=count() by DayofWeek, HourOfDay=format_datetime(bin(TimeGenerated,1h),'HH:mm')
""".format(
**ipaddr_query_params(), hostname=hostname
)
failed_logons_df = qry_prov.exec_query(failed_logons)
elif ip_entity['OSType'] =='Linux':
if "Syslog" not in available_datasets:
raise ValueError("No Linux syslog data available in the workspace")
else:
failed_logons_df = qry_prov.LinuxSyslog.user_logon(invest_times, account_name ='', add_query_items="""| where HostIP == '{ipaddr_text.value}' |extend Account = AccountName | extend DayofWeek = case(dayofweek(TimeGenerated) == time(1.00:00:00), "Monday", dayofweek(TimeGenerated) == time(2.00:00:00), "Tuesday",
dayofweek(TimeGenerated) == time(3.00:00:00), "Wednesday",
dayofweek(TimeGenerated) == time(4.00:00:00), "Thursday",
dayofweek(TimeGenerated) == time(5.00:00:00), "Friday",
dayofweek(TimeGenerated) == time(6.00:00:00), "Saturday", "Sunday") | summarize LogonCount=count() by DayofWeek, HourOfDay=format_datetime(bin(TimeGenerated,1h),'HH:mm')""")
# Plotting hearmap using seaborn library if there are failed logons
if len(failed_logons_df) > 0:
df_pivot = (
failed_logons_df.reset_index()
.pivot_table(index="DayofWeek", columns="HourOfDay", values="LogonCount")
.fillna(0)
)
display(
Markdown(
f'### <span style="color:blue"> Heatmap - Weekly Failed Logon Trend :: </span>'
)
)
f, ax = plt.subplots(figsize=(16, 8))
hm1 = sns.heatmap(df_pivot, cmap="YlGnBu", ax=ax)
plt.xticks(rotation=45)
plt.yticks(rotation=30)
else:
linux_logons=qry_prov.LinuxSyslog.list_logons_for_source_ip(**ipaddr_query_params())
failed_logons = (logon_events[logon_events['LogonResult'] == 'Failure'])
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Host Logons Timeline
# +
# set the origin time to the time of our alert
try:
origin_time = (related_alert.TimeGenerated
if recenter_wgt.value
else query_times.origin_time)
except NameError:
origin_time = query_times.origin_time
logon_query_times = nbwidgets.QueryTime(
units="day",
origin_time=origin_time,
before=5,
after=1,
max_before=20,
max_after=20,
)
logon_query_times.display()
# -
if ipaddr_origin == "Internal":
host_logons = qry_prov.WindowsSecurity.list_host_logons(
logon_query_times, host_name=hostname
)
if host_logons is not None and not host_logons.empty:
display(Markdown("### Logon timeline."))
tooltip_cols = [
"TargetUserName",
"TargetDomainName",
"SubjectUserName",
"SubjectDomainName",
"LogonType",
"IpAddress",
]
nbdisplay.display_timeline(
data=host_logons,
group_by="TargetUserName",
source_columns=tooltip_cols,
legend="right", yaxis=True
)
display(Markdown("### Counts of logon events by logon type."))
display(Markdown("Min counts for each logon type highlighted."))
logon_by_type = (
host_logons[["Account", "LogonType", "EventID"]]
.astype({'LogonType': 'int32'})
.merge(right=pd.Series(data=nbdisplay._WIN_LOGON_TYPE_MAP, name="LogonTypeDesc"),
left_on="LogonType", right_index=True)
.drop(columns="LogonType")
.groupby(["Account", "LogonTypeDesc"])
.count()
.unstack()
.rename(columns={"EventID": "LogonCount"})
.fillna(0)
.style
.background_gradient(cmap="viridis", low=0.5, high=0)
.format("{0:0>3.0f}")
)
display(logon_by_type)
else:
display(Markdown("No logon events found for host."))
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# ### Failed Logons Timeline
if ipaddr_origin == "Internal":
failedLogons = qry_prov.WindowsSecurity.list_host_logon_failures(
logon_query_times, host_name=ip_entity.hostname
)
if failedLogons.empty:
print("No logon failures recorded for this host between ",
f" {logon_query_times.start} and {logon_query_times.end}"
)
else:
nbdisplay.display_timeline(
data=host_logons.query('TargetLogonId != "0x3e7"'),
overlay_data=failedLogons,
alert=related_alert,
title="Logons (blue=user-success, green=failed)",
source_columns=tooltip_cols,
height=200,
)
display(failedLogons
.astype({'LogonType': 'int32'})
.merge(right=pd.Series(data=nbdisplay._WIN_LOGON_TYPE_MAP, name="LogonTypeDesc"),
left_on="LogonType", right_index=True)
[['Account', 'EventID', 'TimeGenerated',
'Computer', 'SubjectUserName', 'SubjectDomainName',
'TargetUserName', 'TargetDomainName',
'LogonTypeDesc','IpAddress', 'WorkstationName'
]])
else:
md(f'Analysis section Not Applicable since IP address owner is {ipaddr_origin}', styles=["bold","red"])
# <a></a>[Contents](#toc)
# ## Network Connection Analysis
#
# **Hypothesis:** That an attacker is remotely communicating with the host in order to compromise the host or for outbound communication to C2 for data exfiltration purposes after compromising the host.
#
# This section provides an overview of network activity to and from the host during hunting time frame, the purpose of this is for the identification of anomalous network traffic. If you wish to investigate a specific IP in detail it is recommended that to use another instance of this notebook with each IP addresses.
#
# > Note: this query can return a lot of data for active hosts
# > If your query times out, try reducing the time range, breaking the analysis
# > into chunks
# <a></a>[Contents](#toc)
# ### Network Check Communications with Other Hosts
ip_q_times = nbwidgets.QueryTime(
label="Set time bounds for network queries",
units="hour",
max_before=120,
before=5,
after=5,
max_after=60,
origin_time=logon_query_times.origin_time
)
ip_q_times.display()
# <a></a>[Contents](#toc)
# ### Query Flows by IP Address
if "AzureNetworkAnalytics_CL" not in available_datasets:
md_warn("No network flow data available.")
md("Please skip the remainder of this section and go to [Time-Series-Anomalies](#Outbound-Data-transfer-Time-Series-Anomalies)")
az_net_comms_df = None
else:
all_host_ips = (
ip_entity['private_ips'] + ip_entity['public_ips']
)
host_ips = [i.Address for i in all_host_ips]
az_net_comms_df = qry_prov.Network.list_azure_network_flows_by_ip(
ip_q_times, ip_address_list=host_ips
)
if isinstance(az_net_comms_df, pd.DataFrame) and not az_net_comms_df.empty:
az_net_comms_df['TotalAllowedFlows'] = az_net_comms_df['AllowedOutFlows'] + az_net_comms_df['AllowedInFlows']
nbdisplay.display_timeline(
data=az_net_comms_df,
group_by="L7Protocol",
title="Network Flows by Protocol",
time_column="FlowStartTime",
source_columns=["FlowType", "AllExtIPs", "L7Protocol", "FlowDirection"],
height=300,
legend="right",
yaxis=True
)
nbdisplay.display_timeline(
data=az_net_comms_df,
group_by="FlowDirection",
title="Network Flows by Direction",
time_column="FlowStartTime",
source_columns=["FlowType", "AllExtIPs", "L7Protocol", "FlowDirection"],
height=300,
legend="right",
yaxis=True
)
else:
md_warn("No network data for specified time range.")
md("Please skip the remainder of this section and go to [Time-Series-Anomalies](#Outbound-Data-transfer-Time-Series-Anomalies)")
try:
flow_plot = nbdisplay.display_timeline_values(
data=az_net_comms_df,
group_by="L7Protocol",
source_columns=["FlowType",
"AllExtIPs",
"L7Protocol",
"FlowDirection",
"TotalAllowedFlows"],
time_column="FlowStartTime",
y="TotalAllowedFlows",
legend="right",
height=500,
kind=["vbar", "circle"],
);
except NameError as err:
md(f"Error Occured, Make sure to execute previous cells in notebook: {err}",styles=["bold","red"])
try:
if az_net_comms_df is not None and not az_net_comms_df.empty:
cm = sns.light_palette("green", as_cmap=True)
cols = [
"VMName",
"VMIPAddress",
"PublicIPs",
"SrcIP",
"DestIP",
"L4Protocol",
"L7Protocol",
"DestPort",
"FlowDirection",
"AllExtIPs",
"TotalAllowedFlows",
]
flow_index = az_net_comms_df[cols].copy()
def get_source_ip(row):
if row.FlowDirection == "O":
return row.VMIPAddress if row.VMIPAddress else row.SrcIP
else:
return row.AllExtIPs if row.AllExtIPs else row.DestIP
def get_dest_ip(row):
if row.FlowDirection == "O":
return row.AllExtIPs if row.AllExtIPs else row.DestIP
else:
return row.VMIPAddress if row.VMIPAddress else row.SrcIP
flow_index["source"] = flow_index.apply(get_source_ip, axis=1)
flow_index["dest"] = flow_index.apply(get_dest_ip, axis=1)
display(flow_index)
# Uncomment to view flow_index results
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# display(
# flow_index[
# ["source", "dest", "L7Protocol", "FlowDirection", "TotalAllowedFlows"]
# ]
# .groupby(["source", "dest", "L7Protocol", "FlowDirection"])
# .sum()
# .reset_index()
# .style.bar(subset=["TotalAllowedFlows"], color="#d65f5f")
# )
except NameError as err:
md(f"Error Occured, Make sure to execute previous cells in notebook: {err}",styles=["bold","red"])
# <a></a>[Contents](#toc)
# ### Bulk whois lookup
# +
# Bulk WHOIS lookup function
from functools import lru_cache
from ipwhois import IPWhois
from ipaddress import ip_address
try:
# Add ASN informatio from Whois
flows_df = (
flow_index[["source", "dest", "L7Protocol", "FlowDirection", "TotalAllowedFlows"]]
.groupby(["source", "dest", "L7Protocol", "FlowDirection"])
.sum()
.reset_index()
)
num_ips = len(flows_df["source"].unique()) + len(flows_df["dest"].unique())
print(f"Performing WhoIs lookups for {num_ips} IPs ", end="")
#flows_df = flows_df.assign(DestASN="", DestASNFull="", SourceASN="", SourceASNFull="")
flows_df["DestASN"] = flows_df.apply(lambda x: get_whois_info(x.dest, True), axis=1)
flows_df["SourceASN"] = flows_df.apply(lambda x: get_whois_info(x.source, True), axis=1)
print("done")
# Split the tuple returned by get_whois_info into separate columns
flows_df["DestASNFull"] = flows_df.apply(lambda x: x.DestASN[1], axis=1)
flows_df["DestASN"] = flows_df.apply(lambda x: x.DestASN[0], axis=1)
flows_df["SourceASNFull"] = flows_df.apply(lambda x: x.SourceASN[1], axis=1)
flows_df["SourceASN"] = flows_df.apply(lambda x: x.SourceASN[0], axis=1)
our_host_asns = [get_whois_info(ip.Address)[0] for ip in ip_entity.public_ips]
md(f"Host {ip_entity.hostname} ASNs:", "bold")
md(str(our_host_asns))
flow_sum_df = flows_df.groupby(["DestASN", "SourceASN"]).agg(
TotalAllowedFlows=pd.NamedAgg(column="TotalAllowedFlows", aggfunc="sum"),
L7Protocols=pd.NamedAgg(column="L7Protocol", aggfunc=lambda x: x.unique().tolist()),
source_ips=pd.NamedAgg(column="source", aggfunc=lambda x: x.unique().tolist()),
dest_ips=pd.NamedAgg(column="dest", aggfunc=lambda x: x.unique().tolist()),
).reset_index()
display(flow_sum_df)
except NameError as err:
md(f"Error Occured, Make sure to execute previous cells in notebook: {err}",styles=["bold","red"])
# -
# ### Choose ASNs/IPs to Check for Threat Intel Reports
# Choose from the list of Selected ASNs for the IPs you wish to check on.
# The Source list is been pre-populated with all ASNs found in the network flow summary.
#
# As an example, we've populated the `Selected` list with the ASNs that have the lowest number of flows to and from the host. We also remove the ASN that matches the ASN of the host we are investigating.
#
# Please edit this list, using flow summary data above as a guide and leaving only ASNs that you are suspicious about. Typicially these would be ones with relatively low `TotalAllowedFlows` and possibly with unusual `L7Protocols`.
try:
if isinstance(flow_sum_df, pd.DataFrame) and not flow_sum_df.empty:
all_asns = list(flow_sum_df["DestASN"].unique()) + list(flow_sum_df["SourceASN"].unique())
all_asns = set(all_asns) - set(["private address"])
# Select the ASNs in the 25th percentile (lowest number of flows)
quant_25pc = flow_sum_df["TotalAllowedFlows"].quantile(q=[0.25]).iat[0]
quant_25pc_df = flow_sum_df[flow_sum_df["TotalAllowedFlows"] <= quant_25pc]
other_asns = list(quant_25pc_df["DestASN"].unique()) + list(quant_25pc_df["SourceASN"].unique())
other_asns = set(other_asns) - set(our_host_asns)
md("Choose IPs from Selected ASNs to look up for Threat Intel.", "bold")
sel_asn = nbwidgets.SelectSubset(source_items=all_asns, default_selected=other_asns)
except NameError as err:
md(f"Error Occured, Make sure to execute previous cells in notebook: {err}",styles=["bold","red"])
try:
if isinstance(flow_sum_df, pd.DataFrame) and not flow_sum_df.empty:
ti_lookup = TILookup()
from itertools import chain
dest_ips = set(chain.from_iterable(flow_sum_df[flow_sum_df["DestASN"].isin(sel_asn.selected_items)]["dest_ips"]))
src_ips = set(chain.from_iterable(flow_sum_df[flow_sum_df["SourceASN"].isin(sel_asn.selected_items)]["source_ips"]))
selected_ips = dest_ips | src_ips
print(f"{len(selected_ips)} unique IPs in selected ASNs")
# Add the IoCType to save cost of inferring each item
selected_ip_dict = {ip: "ipv4" for ip in selected_ips}
ti_results = ti_lookup.lookup_iocs(data=selected_ip_dict)
print(f"{len(ti_results)} results received.")
# ti_results_pos = ti_results[ti_results["Severity"] > 0]
#####
# WARNING - faking results for illustration purposes
#####
ti_results_pos = ti_results.sample(n=2)
print(f"{len(ti_results_pos)} positive results found.")
if not ti_results_pos.empty:
src_pos = flows_df.merge(ti_results_pos, left_on="source", right_on="Ioc")
dest_pos = flows_df.merge(ti_results_pos, left_on="dest", right_on="Ioc")
ti_ip_results = pd.concat([src_pos, dest_pos])
md_warn("Positive Threat Intel Results found for the following flows")
md("Please examine these IP flows using the IP Explorer notebook.", "bold, large")
display(ti_ip_results)
except NameError as err:
md(f"Error Occured, Make sure to execute previous cells in notebook: {err}",styles=["bold","red"])
# ### GeoIP Map of External IPs
# +
iplocation = GeoLiteLookup()
def format_ip_entity(row, ip_col):
ip_entity = entities.IpAddress(Address=row[ip_col])
iplocation.lookup_ip(ip_entity=ip_entity)
ip_entity.AdditionalData["protocol"] = row.L7Protocol
if "severity" in row:
ip_entity.AdditionalData["threat severity"] = row["severity"]
if "Details" in row:
ip_entity.AdditionalData["threat details"] = row["Details"]
return ip_entity
# from msticpy.nbtools.foliummap import FoliumMap
folium_map = FoliumMap()
if az_net_comms_df is None or az_net_comms_df.empty:
print("No network flow data available.")
else:
# Get the flow records for all flows not in the TI results
selected_out = flows_df[flows_df["DestASN"].isin(sel_asn.selected_items)]
selected_out = selected_out[~selected_out["dest"].isin(ti_ip_results["Ioc"])]
if selected_out.empty:
ips_out = []
else:
ips_out = list(selected_out.apply(lambda x: format_ip_entity(x, "dest"), axis=1))
selected_in = flows_df[flows_df["SourceASN"].isin(sel_asn.selected_items)]
selected_in = selected_in[~selected_in["source"].isin(ti_ip_results["Ioc"])]
if selected_in.empty:
ips_in = []
else:
ips_in = list(selected_in.apply(lambda x: format_ip_entity(x, "source"), axis=1))
ips_threats = list(ti_ip_results.apply(lambda x: format_ip_entity(x, "Ioc"), axis=1))
display(HTML("<h3>External IP Addresses communicating with host</h3>"))
display(HTML("Numbered circles indicate multiple items - click to expand"))
display(HTML("Location markers: <br>Blue = outbound, Purple = inbound, Green = Host, Red = Threats"))
icon_props = {"color": "green"}
for ips in ip_entity.public_ips:
ips.AdditionalData["host"] = ip_entity.hostname
folium_map.add_ip_cluster(ip_entities=ip_entity.public_ips, **icon_props)
icon_props = {"color": "blue"}
folium_map.add_ip_cluster(ip_entities=ips_out, **icon_props)
icon_props = {"color": "purple"}
folium_map.add_ip_cluster(ip_entities=ips_in, **icon_props)
icon_props = {"color": "red"}
folium_map.add_ip_cluster(ip_entities=ips_threats, **icon_props)
display(folium_map)
# -
# <a></a>[Contents](#toc)
# ### Outbound Data transfer Time Series Anomalies
# This section will look into the network datasources to check outbound data transfer trends.
# You can also use time series analysis using below built-in KQL query example to analyze anamalous data transfer trends.below example shows sample dataset trends comparing with actual vs baseline traffic trends.
# +
if "VMConnection" in table_index or "CommonSecurityLog" in table_index:
# KQL query for full text search of IP address and display all datatypes
dataxfer_stats = """
union isfuzzy=true
(
CommonSecurityLog
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| where isnotempty(DestinationIP) and isnotempty(SourceIP)
| where SourceIP == \'{ip_address}\'
| extend SentBytesinKB = (SentBytes / 1024), ReceivedBytesinKB = (ReceivedBytes / 1024)
| summarize DailyCount = count(), ListOfDestPorts = make_set(DestinationPort), TotalSentBytesinKB = sum(SentBytesinKB), TotalReceivedBytesinKB = sum(ReceivedBytesinKB) by SourceIP, DestinationIP, DeviceVendor, bin(TimeGenerated,1d)
| project DeviceVendor, TimeGenerated, SourceIP, DestinationIP, ListOfDestPorts, TotalSentBytesinKB, TotalReceivedBytesinKB
),
(
VMConnection
| where TimeGenerated >= datetime({start}) and TimeGenerated <= datetime({end})
| where isnotempty(DestinationIp) and isnotempty(SourceIp)
| where SourceIp == \'{ip_address}\'
| extend DeviceVendor = "VMConnection", SourceIP = SourceIp, DestinationIP = DestinationIp
| extend SentBytesinKB = (BytesSent / 1024), ReceivedBytesinKB = (BytesReceived / 1024)
| summarize DailyCount = count(), ListOfDestPorts = make_set(DestinationPort), TotalSentBytesinKB = sum(SentBytesinKB),TotalReceivedBytesinKB = sum(ReceivedBytesinKB) by SourceIP, DestinationIP, DeviceVendor, bin(TimeGenerated,1d)
| project DeviceVendor, TimeGenerated, SourceIP, DestinationIP, ListOfDestPorts, TotalSentBytesinKB, TotalReceivedBytesinKB
)
""".format(**ipaddr_query_params())
dataxfer_stats_df = qry_prov.exec_query(dataxfer_stats)
#Display result as transposed matrix of datatypes availabel to query for the query period
if len(dataxfer_stats_df) > 0:
md(
'Data transfer daily stats for IP ::', styles=["bold","green"]
)
display(dataxfer_stats_df)
else:
md_warn(
f'No Data transfer logs found for the query period'
)
#####
# WARNING - faking results for illustration purposes
#####
md(
'Visualizing time series data transfer on dummy dataset for demonstration ::', styles=["bold","green"]
)
# Generating graph based on dummy dataset in custom table representing Flow records outbound data transfer
timechartquery = """
let TimeSeriesData = PaloAltoBytesSent_CL
| extend TimeGenerated = todatetime(EventTime_s), TotalBytesSent = todouble(TotalBytesSent_s)
| summarize TimeGenerated=make_list(TimeGenerated, 10000),TotalBytesSent=make_list(TotalBytesSent, 10000) by deviceVendor_s
| project TimeGenerated, TotalBytesSent;
TimeSeriesData
| extend (baseline,seasonal,trend,residual) = series_decompose(TotalBytesSent)
| mv-expand TotalBytesSent to typeof(double), TimeGenerated to typeof(datetime), baseline to typeof(long), seasonal to typeof(long), trend to typeof(long), residual to typeof(long)
| project TimeGenerated, TotalBytesSent, baseline
| render timechart with (title="Palo Alto Outbound Data Transfer Time Series decomposition")
"""
# %kql -query timechartquery
# -
# ## Conclusion
# ### List of Suspicious Activities/ Observables/Hunting bookmarks
# - Suspicious alerts for the IP
# - Anamalous Failed Logon trend on few days at 04:00 AM
# - Anamalous spike in traffic logs on http
# - Positive TI Hit from Open source feeds.
# - Unusual data transfer deviating from normal baseline.
# <a></a>[Contents](#toc)
# ## Appendices
# ## Available DataFrames
print('List of current DataFrames in Notebook')
print('-' * 50)
current_vars = list(locals().keys())
for var_name in current_vars:
if isinstance(locals()[var_name], pd.DataFrame) and not var_name.startswith('_'):
print(var_name)
# ### Saving Data to Excel
# To save the contents of a pandas DataFrame to an Excel spreadsheet
# use the following syntax
# ```
# writer = pd.ExcelWriter('myWorksheet.xlsx')
# my_data_frame.to_excel(writer,'Sheet1')
# writer.save()
# ```
# ## Configuration
#
# ### `msticpyconfig.yaml` configuration File
# You can configure primary and secondary TI providers and any required parameters in the `msticpyconfig.yaml` file. This is read from the current directory or you can set an environment variable (`MSTICPYCONFIG`) pointing to its location.
#
# To configure this file see the [ConfigureNotebookEnvironment notebook](https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/ConfiguringNotebookEnvironment.ipynb)
| Entity Explorer - IP Address.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import namedtuple
num_rows = int(input())
column_names = input().split()
Student = namedtuple('Student', column_names)
marks_sum = 0
for _ in range(num_rows):
fields = input().split()
student = Student(*fields)
marks_sum += int(student.MARKS)
print("{:.2f}".format(marks_sum / num_rows))
# +
# in short hand
from collections import namedtuple
n, Student = int(input()), namedtuple('Student', input())
print("{:.2f}".format(sum([int(Student(*input().split()).MARKS) for _ in range(n)]) / n))
| Python/7. collection/37. collections named tuple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Example 5: Laplace equation
#
# In this tutorial we will look constructing the steady-state heat example using the Laplace equation. In contrast to the previous tutorials this example is entirely driven by the prescribed Dirichlet and Neumann boundary conditions, instead of an initial condition. We will also demonstrate how to use Devito to solve a steady-state problem without time derivatives and how to switch buffers explicitly without having to re-compile the kernel.
#
# First, we again define our governing equation:
# $$\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = 0$$
#
# We are again discretizing second-order derivatives using a central difference scheme to construct a diffusion problem (see tutorial 3). This time we have no time-dependent term in our equation though, since there is no term $p_{i,j}^{n+1}$. This means that we are simply updating our field variable $p$ over and over again, until we have reached an equilibrium state. In a discretised form, after rearranging to update the central point $p_{i,j}^n$ we have
# $$p_{i,j}^n = \frac{\Delta y^2(p_{i+1,j}^n+p_{i-1,j}^n)+\Delta x^2(p_{i,j+1}^n + p_{i,j-1}^n)}{2(\Delta x^2 + \Delta y^2)}$$
#
# And, as always, we first re-create the original implementation to see what we are aiming for. Here we initialise the field $p$ to $0$ and apply the following bounday conditions:
#
# $p=0$ at $x=0$
#
# $p=y$ at $x=2$
#
# $\frac{\partial p}{\partial y}=0$ at $y=0, \ 1$
#
# **Developer note:**
# The original tutorial stores the field data in the layout `(ny, nx)`. Until now we have used `(x, y)` notation for creating our Devito examples, but for this one we will adopt the `(y, x)` layout for compatibility reasons.
# +
from examples.cfd import plot_field
import numpy as np
# %matplotlib inline
# Some variable declarations
nx = 31
ny = 31
c = 1
dx = 2. / (nx - 1)
dy = 1. / (ny - 1)
# -
def laplace2d(p, bc_y, dx, dy, l1norm_target):
l1norm = 1
pn = np.empty_like(p)
while l1norm > l1norm_target:
pn = p.copy()
p[1:-1, 1:-1] = ((dy**2 * (pn[1:-1, 2:] + pn[1:-1, 0:-2]) +
dx**2 * (pn[2:, 1:-1] + pn[0:-2, 1:-1])) /
(2 * (dx**2 + dy**2)))
p[:, 0] = 0 # p = 0 @ x = 0
p[:, -1] = bc_right # p = y @ x = 2
p[0, :] = p[1, :] # dp/dy = 0 @ y = 0
p[-1, :] = p[-2, :] # dp/dy = 0 @ y = 1
l1norm = (np.sum(np.abs(p[:]) - np.abs(pn[:])) /
np.sum(np.abs(pn[:])))
return p
# +
#NBVAL_IGNORE_OUTPUT
# Out initial condition is 0 everywhere,except at the boundary
p = np.zeros((ny, nx))
# Boundary conditions
bc_right = np.linspace(0, 1, ny)
p[:, 0] = 0 # p = 0 @ x = 0
p[:, -1] = bc_right # p = y @ x = 2
p[0, :] = p[1, :] # dp/dy = 0 @ y = 0
p[-1, :] = p[-2, :] # dp/dy = 0 @ y = 1
plot_field(p, ymax=1.0, view=(30, 225))
# +
#NBVAL_IGNORE_OUTPUT
p = laplace2d(p, bc_right, dx, dy, 1e-4)
plot_field(p, ymax=1.0, view=(30, 225))
# -
# Ok, nice. Now, to re-create this example in Devito we need to look a little bit further under the hood. There are two things that make this different to the examples we covered so far:
# * We have no time dependence in the `p` field, but we still need to advance the state of p in between buffers. So, instead of using `TimeFunction` objects that provide multiple data buffers for timestepping schemes, we will use `Function` objects that have no time dimension and only allocate a single buffer according to the space dimensions. However, since we are still implementing a pseudo-timestepping loop, we will need two objects, say `p` and `pn`, to act as alternating buffers.
# * If we're using two different symbols to denote our buffers, any operator we create will only perform a single timestep. This is desired though, since we need to check a convergence criteria outside of the main stencil update to determine when we stop iterating. As a result we will need to call the operator repeatedly after instantiating it outside the convergence loop.
#
# So, how do we make sure our operator doesn't accidentally overwrite values in the same buffer? Well, we can again let SymPy reorganise our Laplace equation based on `pn` to generate the stencil, but when we create the update expression, we set the LHS to our second buffer variable `p`.
# +
from devito import Grid, Function, Eq, INTERIOR, solve
# Create two explicit buffers for pseudo-timestepping
grid = Grid(shape=(nx, ny), extent=(1., 2.))
p = Function(name='p', grid=grid, space_order=2)
pn = Function(name='pn', grid=grid, space_order=2)
# Create Laplace equation base on `pn`
eqn = Eq(pn.laplace, region=INTERIOR)
# Let SymPy solve for the central stencil point
stencil = solve(eqn, pn)
# Now we let our stencil populate our second buffer `p`
eq_stencil = Eq(p, stencil)
# In the resulting stencil `pn` is exclusively used on the RHS
# and `p` on the LHS is the grid the kernel will update
print("Update stencil:\n%s\n" % eq_stencil)
# -
# Now we can add our boundary conditions. We have already seen how to prescribe constant Dirichlet BCs by simply setting values using the low-level notation. This time we will go a little further by setting a prescribed profile, which we create first as a custom 1D symbol and supply with the BC values. For this we need to create a `Function` object that has a different shape than our general `grid`, so instead of the grid we provide an explicit pair of dimension symbols and the according shape for the data.
x, y = grid.dimensions
bc_right = Function(name='bc_right', shape=(nx, ), dimensions=(x, ))
bc_right.data[:] = np.linspace(0, 1, nx)
# Now we can create a set of expressions for the BCs again, where we wet prescribed values on the right and left of our grid. For the Neuman BCs along the top and bottom boundaries we simply copy the second rwo from the outside into the outermost row, just as the original tutorial did. Using these expressions and our stencil update we can now create an operator.
# +
#NBVAL_IGNORE_OUTPUT
from devito import Operator
# Create boundary condition expressions
bc = [Eq(p[x, 0], 0.)] # p = 0 @ x = 0
bc += [Eq(p[x, ny-1], bc_right[x])] # p = y @ x = 2
bc += [Eq(p[0, y], p[1, y])] # dp/dy = 0 @ y = 0
bc += [Eq(p[nx-1, y], p[nx-2, y])] # dp/dy = 0 @ y = 1
# Now we can build the operator that we need
op = Operator(expressions=[eq_stencil] + bc)
# -
# We can now use this single-step operator repeatedly in a Python loop, where we can arbitrarily execute other code in between invocations. This allows us to update our L1 norm and check for convergence. Using our pre0compiled operator now comes down to a single function call that supplies the relevant data symbols. One thing to note is that we now do exactly the same thing as the original NumPy loop, in that we deep-copy the data between each iteration of the loop, which we will look at after this.
# +
#NBVAL_IGNORE_OUTPUT
# Silence the runtime performance logging
from devito import configuration
configuration['log_level'] = 'ERROR'
# Initialise the two buffer fields
p.data[:] = 0.
p.data[:, -1] = np.linspace(0, 1, ny)
pn.data[:] = 0.
pn.data[:, -1] = np.linspace(0, 1, ny)
# Visualize the initial condition
plot_field(p.data, ymax=1.0, view=(30, 225))
# Run the convergence loop with deep data copies
l1norm_target = 1.e-4
l1norm = 1
while l1norm > l1norm_target:
# This call implies a deep data copy
pn.data[:] = p.data[:]
op(p=p, pn=pn)
l1norm = (np.sum(np.abs(p.data[:]) - np.abs(pn.data[:])) /
np.sum(np.abs(pn.data[:])))
# Visualize the converged steady-state
plot_field(p.data, ymax=1.0, view=(30, 225))
# -
# One crucial detail about the code above is that the deep data copy between iterations will really hurt performance if we were to run this on a large grid. However, we have already seen how we can match data symbols to symbolic names when calling the pre-compiled operator, which we can now use to actually switch the roles of `pn` and `p` between iterations, eg. `op(p=pn, pn=p)`. Thus, we can implement a simple buffer-switching scheme by simply testing for odd and even time-steps, without ever having to shuffle data around.
# +
#NBVAL_IGNORE_OUTPUT
# Initialise the two buffer fields
p.data[:] = 0.
p.data[:, -1] = np.linspace(0, 1, ny)
pn.data[:] = 0.
pn.data[:, -1] = np.linspace(0, 1, ny)
# Visualize the initial condition
plot_field(p.data, ymax=1.0, view=(30, 225))
# Run the convergence loop by explicitly flipping buffers
l1norm_target = 1.e-4
l1norm = 1
counter = 0
while l1norm > l1norm_target:
# Determine buffer order
if counter % 2 == 0:
_p = p
_pn = pn
else:
_p = pn
_pn = p
# Apply operator
op(p=_p, pn=_pn)
# Compute L1 norm
l1norm = (np.sum(np.abs(_p.data[:]) - np.abs(_pn.data[:])) /
np.sum(np.abs(_pn.data[:])))
counter += 1
plot_field(p.data, ymax=1.0, view=(30, 225))
| examples/cfd/05_laplace.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **[Machine Learning Course Home Page](https://www.kaggle.com/learn/machine-learning)**
#
# ---
#
# This exercise will test your ability to read a data file and understand statistics about the data.
#
# In later exercises, you will apply techniques to filter the data, build a machine learning model, and iteratively improve your model.
#
# The course examples use data from Melbourne. To ensure you can apply these techniques on your own, you will have to apply them to a new dataset (with house prices from Iowa).
#
# The exercises use a "notebook" coding environment. In case you are unfamiliar with notebooks, we have a [90-second intro video](https://www.youtube.com/watch?v=4C2qMnaIKL4).
#
# # Exercises
#
# Run the following cell to set up code-checking, which will verify your work as you go.
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex2 import *
print("Setup Complete")
# ## Step 1: Loading Data
# Read the Iowa data file into a Pandas DataFrame called `home_data`.
# +
import pandas as pd
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
# Fill in the line below to read the file into a variable home_data
home_data = pd.read_csv(iowa_file_path)
# Call line below with no argument to check that you've loaded the data correctly
step_1.check()
# +
# Lines below will give you a hint or solution code
#step_1.hint()
#step_1.solution()
# -
# ## Step 2: Review The Data
# Use the command you learned to view summary statistics of the data. Then fill in variables to answer the following questions
# Print summary statistics in next line
home_data.describe()
# +
# What is the average lot size (rounded to nearest integer)?
avg_lot_size = 10517
# As of today, how old is the newest home (current year - the date in which it was built)
newest_home_age = 2021-2010
# Checks your answers
step_2.check()
# -
step_2.hint()
step_2.solution()
# ## Think About Your Data
#
# The newest house in your data isn't that new. A few potential explanations for this:
# 1. They haven't built new houses where this data was collected.
# 1. The data was collected a long time ago. Houses built after the data publication wouldn't show up.
#
# If the reason is explanation #1 above, does that affect your trust in the model you build with this data? What about if it is reason #2?
#
# How could you dig into the data to see which explanation is more plausible?
#
# Check out this **[discussion thread](https://www.kaggle.com/learn-forum/60581)** to see what others think or to add your ideas.
#
# # Keep Going
#
# You are ready for **[Your First Machine Learning Model](https://www.kaggle.com/dansbecker/your-first-machine-learning-model).**
#
# ---
# **[Machine Learning Course Home Page](https://www.kaggle.com/learn/machine-learning)**
#
#
| Intro to Machine Learning/exercise-2-explore-your-data-preet-mehta.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
'''was trying to copy a tutorial but I got pretty stumped
changig the output to something catergorical
https://www.tensorflow.org/tutorials/text/text_generation
'''
import tensorflow as tf
import numpy as np
import math
import random
tf.compat.v1.enable_eager_execution()
tf.__version__
tf.executing_eagerly()
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
# Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# length of text is the number of characters in it
print ('Length of text: {} characters'.format(len(text)))
# Take a look at the first 250 characters in text
print(text[:100])
# The unique characters in the file
vocab = sorted(set(text))
print ('{} unique characters'.format(len(vocab)))
# +
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
# -
print('{')
for char,_ in zip(char2idx, range(5)):
print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))
print(' ...\n}')
# Show how the first 13 characters from the text are mapped to integers
print ('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))
# +
# The maximum length sentence we want for a single input in characters
seq_length = 32
examples_per_epoch = len(text)//(seq_length+1)
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(idx2char[i.numpy()])
# -
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
# +
def assign_radom_n(chunk):
input_text = chunk[:-1]
target_n = random.randint(1,41)
return input_text, target_n
dataset = sequences.map(assign_radom_n)
# -
for input_example, target_n in dataset.take(1):
print ('Input data: ', repr(''.join(idx2char[input_example.numpy()])))
print ('Target data:', repr(str(target_n.numpy())))
# +
# Batch size
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 1000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
dataset
# -
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, 32]),
tf.keras.layers.LSTM(64),
# tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(128),
tf.keras.layers.Dropout(0.8),
tf.keras.layers.Dense(40),
tf.keras.layers.Softmax(1)
])
return model
# +
# Length of the vocabulary in chars
BATCH_SIZE = 128
vocab_size = len(vocab)
embedding_dim = 256
rnn_units = 20
model = build_model(
vocab_size = len(vocab),
embedding_dim= embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
# -
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length,number_of_cats)")
model.summary()
row = dataset.take(1)
temp = model.predict(row)[0]
print(len(temp)), temp
model.compile(optimizer='adam', loss='categorical_crossentropy')
EPOCHS=10
history = model.fit(dataset, epochs=EPOCHS)
| deepmath/deephol/train/B_Skeleton_Architectures/old/.ipynb_checkpoints/biderctional_1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Language Translation
# In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
# ## Get the Data
# Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
# -
# ## Explore the Data
# Play around with view_sentence_range to view different parts of the data.
# +
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
# -
# ## Implement Preprocessing Function
# ### Text to Word Ids
# As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of `target_text`. This will help the neural network predict when the sentence should end.
#
# You can get the `<EOS>` word id by doing:
# ```python
# target_vocab_to_int['<EOS>']
# ```
# You can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`.
# +
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
source_id_text = [[source_vocab_to_int[k] for k in i.split()] for i in source_text.split('\n')]
target_id_text = []
for i in target_text.split('\n'):
tmp = [target_vocab_to_int[k] for k in i.split()]
tmp.append(target_vocab_to_int['<EOS>'])
target_id_text.append(tmp)
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
# -
# ### Preprocess all the data and save it
# Running the code cell below will preprocess all the data and save it to file.
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
# # Check Point
# This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
# -
# ### Check the Version of TensorFlow and Access to GPU
# This will check to make sure you have the correct version of TensorFlow and access to a GPU
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# -
# ## Build the Neural Network
# You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
# - `model_inputs`
# - `process_decoder_input`
# - `encoding_layer`
# - `decoding_layer_train`
# - `decoding_layer_infer`
# - `decoding_layer`
# - `seq2seq_model`
#
# ### Input
# Implement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
#
# - Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
# - Targets placeholder with rank 2.
# - Learning rate placeholder with rank 0.
# - Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
# - Target sequence length placeholder named "target_sequence_length" with rank 1
# - Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.
# - Source sequence length placeholder named "source_sequence_length" with rank 1
#
# Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
# +
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
# TODO: Implement Function
_input = tf.placeholder(shape=(None, None), dtype = tf.int32, name="input")
_targets = tf.placeholder(tf.int32, [None, None])
_learning_rate = tf.placeholder(tf.float32)
_keep_prob = tf.placeholder(tf.float32, name="keep_prob")
_target_seq = tf.placeholder(tf.int32, [None], name="target_sequence_length")
_max_seq = tf.reduce_max(_target_seq, name="max_target_len")
_source_seq = tf.placeholder(tf.int32, [None], name="source_sequence_length")
return _input, _targets, _learning_rate, _keep_prob, _target_seq, _max_seq, _source_seq
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
# -
# ### Process Decoder Input
# Implement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch.
# +
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
return tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_encoding_input(process_decoder_input)
# -
# ### Encoding
# Implement `encoding_layer()` to create a Encoder RNN layer:
# * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)
# * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.md#stacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper)
# * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
# +
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
embed = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
# make this an inner function.
def createNewCell(rnn_size):
cell = tf.contrib.rnn.LSTMCell(rnn_size)
return tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([createNewCell(rnn_size) for _ in range(num_layers)])
return tf.nn.dynamic_rnn(cell, embed, sequence_length = source_sequence_length, dtype = tf.float32)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
# -
# ### Decoding - Training
# Create a training decoding layer:
# * Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper)
# * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)
# * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)
# +
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length, time_major=False)
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer=output_layer)
_outputs, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True,
maximum_iterations=max_summary_length)
return _outputs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
# -
# ### Decoding - Inference
# Create inference decoder:
# * Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper)
# * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)
# * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)
# +
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size])
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, end_of_sequence_id)
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer=output_layer)
_outputs, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True,
maximum_iterations=max_target_sequence_length)
return _outputs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
# -
# ### Build the Decoding Layer
# Implement `decoding_layer()` to create a Decoder RNN layer.
#
# * Embed the target sequences
# * Construct the decoder LSTM cell (just like you constructed the encoder cell above)
# * Create an output layer to map the outputs of the decoder to the elements of our vocabulary
# * Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits.
# * Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits.
#
# Note: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference.
# +
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# 1. Decoder Embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell, make this as inner function since I like inner function :)
def make_cell(input_size):
return tf.contrib.rnn.LSTMCell(input_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# 3. Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
# 4. Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
decoder_output = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length,
max_target_sequence_length, output_layer, keep_prob)
# 5. Inference Decoder. Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
infer_output = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], max_target_sequence_length, target_vocab_size,
output_layer, batch_size, keep_prob)
return decoder_output, infer_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
# -
# ### Build the Neural Network
# Apply the functions you implemented above to:
#
# - Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`.
# - Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function.
# - Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function.
# +
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
_output, _state = encoding_layer(input_data, rnn_size, num_layers, keep_prob, source_sequence_length,
source_vocab_size, enc_embedding_size)
preprocess_target_data = process_decoder_input(target_data, target_vocab_to_int, batch_size)
#Apply embedding to the target data for the decoder.
return decoding_layer(preprocess_target_data, _state, target_sequence_length, max_target_sentence_length,
rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob,
dec_embedding_size)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
# -
# ## Neural Network Training
# ### Hyperparameters
# Tune the following parameters:
#
# - Set `epochs` to the number of epochs.
# - Set `batch_size` to the batch size.
# - Set `rnn_size` to the size of the RNNs.
# - Set `num_layers` to the number of layers.
# - Set `encoding_embedding_size` to the size of the embedding for the encoder.
# - Set `decoding_embedding_size` to the size of the embedding for the decoder.
# - Set `learning_rate` to the learning rate.
# - Set `keep_probability` to the Dropout keep probability
# - Set `display_step` to state how many steps between each debug output statement
# Number of Epochs
epochs = 7
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 100
decoding_embedding_size = 100
# Learning Rate
learning_rate = 0.003
# Dropout Keep Probability
keep_probability = 0.5
display_step = 250
# ### Build the Graph
# Build the graph using the neural network you implemented.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# -
# Batch and pad the source and target sequences
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
# -
# ### Train
# Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
# -
# ### Save Parameters
# Save the `batch_size` and `save_path` parameters for inference.
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
# # Checkpoint
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
# -
# ## Sentence to Sequence
# To feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.
#
# - Convert the sentence to lowercase
# - Convert words into ids using `vocab_to_int`
# - Convert words not in the vocabulary, to the `<UNK>` word id.
# +
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
return [vocab_to_int[i.lower()] if i.lower() in vocab_to_int else vocab_to_int['<UNK>'] for i in sentence.split()]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
# -
# ## Translate
# This will translate `translate_sentence` from English to French.
# +
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
# + active=""
# # Result from Google translate: Il a vu un vieux camion jaune.
# -
# # Imperfect Translation
# You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.
#
# You can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.
# ## Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| language-translation/dlnd_language_translation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# # Exercises: Control flow
# ### if...
#
# Write a conditional statement that prints the number itself if it is smaller than zero, and the string "positive" if the number is larger than or equal to zero.
#
# ### for-loops
#
# Loop over integers between 1 and 100 and print their squares.
# ### while
#
# Do the same with a `while` statement
# ### arrays
#
# Use an array comprehension to create an an array that stores the squares for all integers between 1 and 100.
# ## Epidemic simulation
#
# Fill in the missing pieces to the second for loop below so that the infection spreads horizontally as well.
# The following two lines load the epidemic functions from a file
include("../epidemic_simple.jl")
cells = make_cells()
"Update the simulation one time step"
function update!(cells)
# Create a copy to remember the old state
old_cells = deepcopy(cells)
# Loop over pairs of cells in the same row. There are size(cells)[1] columns, and size(cells)[1]-1 pairs.
for i in 1:size(cells)[1]-1
# loop over all columns
for j in 1:size(cells)[2]
# So the cells are (i+1,j) and (i,j). Each will interact with the other.
cells[i,j] = interact(cells[i,j], old_cells[i+1,j])
cells[i+1,j] = interact(cells[i+1,j], old_cells[i,j])
end
end
# Loop over pairs of cells in the same row. There are size(cells)[1] columns, and size(cells)[1]-1 pairs.
for i in 1:size(cells)[1]
# loop over all columns
for j in BLANK
# The cells are (i+1,j) and (i,j). Each will interact with the other.
BLANK
BLANK
end
end
end
update!(cells)
cells
# ### Advanced: FizzBuzz
#
# Implement the (infamous) FizzBuzz test using Julia:
#
# Loop over numbers between 1 and 100. For every element:
# - given a number, N, print "Fizz" if N is divisible by 3,
# - "Buzz" if N is divisible by 5,
# - and "FizzBuzz" if N is divisible by 3 and 5.
# - Otherwise just print the number itself
#
# You can check the remainder of division using the `%` symbol, i.e., `3 % 2 = 1`
| exercises/04_Exercises-control-flow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [![Notebook Tutorial](__code/__all/notebook_tutorial.png)](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/sequential_combine_images_using_metadata/#activate-search)
# <img src='__docs/__all/notebook_rules.png' />
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Your IPTS
# + run_control={"frozen": false, "read_only": false}
from __code.sequential_combine_images_using_metadata import SequentialCombineImagesUsingMetadata
from __code import system
system.System.select_working_dir()
from __code.__all import custom_style
custom_style.style()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Folder containing all images to merge
# + run_control={"frozen": false, "read_only": false}
o_merge = SequentialCombineImagesUsingMetadata(working_dir=system.System.get_working_dir())
o_merge.select_folder()
# -
# # Select Metadata to match
# Only sequential runs having the **same metadata you are going to select** will be combined
o_merge.display_metadata_list()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Merging Method
# + run_control={"frozen": false, "read_only": false}
o_merge.how_to_combine()
# -
# # Create merging list - for checking purpose
o_merge.create_merging_list()
# Check merging list
o_merge.recap_merging_list()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Output Folder and Merge
# + run_control={"frozen": false, "read_only": false}
o_merge.select_output_folder_and_merge()
# -
| notebooks/sequential_combine_images_using_metadata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Making multipanel plots with matplot lib
# first we import numpy and matplot lib as usual
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# then we define an array of angles an their sins and coss using numpy. this time we will use linspace
# +
x= np.linspace(0,2*np.pi,100)
print(x[-1],2*np.pi)
y=np.sin(x)
z=np.cos(x)
w=np.sin(4*x)
v=np.cos(4*x)
# -
# Now lets make a 2 panel plot side-by-side
# +
#call subplots to ger=nerate a multipanel figure. This means 1 row 2 columns of figures
f, axarr = plt.subplots(1 , 2)
#treat axarr as an array, from left to right
#first panel
axarr[0].plot(x ,y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('y')
axarr[0].set_title(r'$\cos(x)$')
#second panel
axarr[1].plot(x ,z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
#add more space between figures
f.subplots_adjust(wspace=0.4)
#fis the axis ratio
#here are 2 options
axarr[0].set_aspect('equal') #make the ratio of the thick units equal, a bit counter intuitive
axarr[1].set_aspect(np.pi) #make a sqare by setting aspect to be the ratio of the tick unit change
# +
#adjust size of figure
fig=plt.figure(figsize=(6,6))
plt.plot(x,y, label=r'$y=\sin(x)$')
plt.plot(x,z, label=r'$y=\cos(x)$')
plt.plot(x,w, label=r'$y=\sin(4x)$')
plt.plot(x,v, label=r'$y=\cos(4x)$')
plt.xlabel(r'$x$')
plt.ylabel(r'$y(x)$')
plt.xlim([0,2*np.pi])
plt.ylim([-1.2,1.2])
plt.legend(loc=1,framealpha=0.95)
plt.gca().set_aspect(np.pi/1.2)
# -
| Multipanel Figures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Using a CNN model
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
import torch.nn.functional as F
import matplotlib.pyplot as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Using {device} device')
# +
# Following the same code as in training_a_model notebook, but using a CNN model
training_data = datasets.FashionMNIST(
root="../data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="../data",
train=False,
download=True,
transform=ToTensor()
)
labels_map = {
0: "T-Shirt",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle Boot",
}
# +
# Look at a sample of the data
x, y = next(iter(training_data))
plt.figure()
plt.imshow(x.squeeze(), cmap="gray")
plt.title(labels_map[y])
plt.show()
# +
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution kernel
self.relu = nn.ReLU()
# The output of a convo layer will be (W - K + 2P) / S + 1, where W is the
# image size, K is the kernel size, P is the padding size, and S is the stride.
# My input images are 1 x 28 x 28 and will be output as 6 x 30 x 30.
self.conv1 = nn.Conv2d(1, 6, kernel_size=3, stride=1, padding=2)
# The output of a pooling layer will be (W - K) / S + 1, where K is the pooling kernel
# and S is the stride. So input 6 x 30 x 30 images will be output as 6 x 14 x 14.
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# Input: 6 x 14 x 14, Output: 16 x 12 x 12
self.conv2 = nn.Conv2d(6, 16, kernel_size=3, stride=1)
# Input: 16 x 12 x 12, Output: 16 x 6 x 6
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# Fully connected layers. All 16*6*6 inputs connect to 120 outputs, connecting the
# convolutional layers to the FC layers.
self.fc1 = nn.Linear(16 * 6 * 6, 120)
# 120 input nodes to 84 output nodes
self.fc2 = nn.Linear(120, 84)
# 84 input nodes to 10 output nodes: the number of labels
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = self.relu(self.conv1(x))
x = self.pool1(x)
# If the size is a square you can only specify a single number
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, self.num_flat_features(x))
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# -
epochs = 10
batch_size = 64
learning_rate = 1e-3
momentum = 0.9
weight_decay = 0.0005
# +
# Initialize the dataloaders and model
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
model = Net()
model.to(device)
# -
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
# +
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
# Get the input data X and label y from the dataloader
for batch, (X, y) in enumerate(dataloader):
# Compute the model prediction given current model parameters.
pred = model(X.to(device))
# Compute the loss from the prediction and the label
loss = loss_fn(pred, y.to(device))
# Optimization: zero gradients, backpropogation, adjust parameters.
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
# Turn off grad computation to reduce overhead of forward pass for testing.
with torch.no_grad():
for X, y in dataloader:
pred = model(X.to(device))
# Accumulate the total loss on the test data.
test_loss += loss_fn(pred, y.to(device)).item()
# Count the number of correct answers to calculate the accuracy.
correct += (pred.argmax(1) == y.to(device)).type(torch.float).sum().item()
# Compute average loss and the overall accuracy of the model.
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
# -
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
| notebooks/cnn_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true)
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/IndependentProbability/IndependentProbability.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + tags=["hide-input"] language="html"
# <button onclick="run_all()">CLICK HERE TO BEGIN</button>
# <script>
# function run_all(){
# Jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# Jupyter.actions.call('jupyter-notebook:save-notebook');
# }
# </script>
# + tags=["hide-input"] language="html"
#
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <p> Code is hidden for ease of viewing. Click the Show/Hide button to see. </>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# -
# # Probability of Independent Events
#
# <img style="float: left;" src="Images/FirstDicePic.svg" width="700">
#
# ## Introduction
#
# Our lives are full of random events! To be an informed and successful person, it is important to get a "feel" for randomness. What is a random event? Flipping a coin, rolling dice, shuffling cards, and lottery draws are all examples of random events. There are **two basic types** of random events: **dependent** random events, and **independent** random events.
#
# What are **dependent** random events? Sometimes the occurrence of an event can affect the probability of the next event occurring. For example, consider removing a playing card from a deck of cards. As you take each card, there are fewer cards left in the deck, and so the probability of drawing a particular card changes on later draws.
#
# This notebook will focus on **independent** events. As you've probably guessed, these are events that are **not affected** by previous events.
#
# **Note:** To express the likelihood of an event occurring, we assign a **probability** to it, usually expressed as a decimal or fraction that is between zero and one.
# ### Die Simulator
#
# Click the "Roll Dice!" button as many times as you'd like. Do you think that the current number on the die affects the likelihood of the next number occurring?
#
# + tags=["hide-input"]
from ipywidgets import Output, VBox
from random import choice
import time
from IPython.display import Image, display, clear_output
from ipywidgets import widgets
Animation = Image(filename="Images/DiceAnimationInfinite.gif", width = 80, height = 80)
N1 = Image(filename="Images/Dice1.gif", width = 50, height = 50)
N2 = Image(filename="Images/Dice2.gif", width = 50, height = 50)
N3 = Image(filename="Images/Dice3.gif", width = 50, height = 50)
N4 = Image(filename="Images/Dice4.gif", width = 50, height = 50)
N5 = Image(filename="Images/Dice5.gif", width = 50, height = 50)
N6 = Image(filename="Images/Dice6.gif", width = 50, height = 50)
### Widgets ###
out1 = Output()
button1 = widgets.Button(description = "Roll Die!")
### Functions ###
def on_button1_clicked(b):
subj = [N1, N2, N3, N4, N5, N6]
with out1:
clear_output()
display(Animation)
time.sleep(2.5)
clear_output(wait=True)
display(choice(subj))
### Display Widgets ###
display(VBox([button1, out1]))
### Initialize Button Click Action ###
button1.on_click(on_button1_clicked)
# -
# ### Experimental Probability
#
# One way of estimating the probability of an event is to do so experimentally. If we do an experiment $x$ times and the event we want occurs $n$ times, then the **experimental probability** of the event is just:
#
# \begin{equation}
# \text{Probability of an event happening} = \dfrac{\text{Number of occurrences of event}}{\text{Number of experiments done}}=\dfrac{n}{x}
# \end{equation}
# #### Question
#
# Consider the event of rolling a particular number (say 3) on the above six-sided die. Roll the above die $x$ times (say 20 times) and count how many times ($n$) the particular number occurs.
#
# *What is your experimental estimate for the probability of rolling a given number on a six-sided die? Would you get the same estimate if you did it again?*
# ### Experimental Die Simulator
#
# Experimental estimates of probability improve if the number of times the experiment is done ($x$) is large. Let's demonstrate this through an example. The slider below allows you to choose how many times the die is rolled. The chart below keeps track of the value of each roll. Start at the bottom of the slider and make your way to the top. Do you see any pattern forming?
# + tags=["hide-input"]
from ipywidgets import interactive
import matplotlib.pyplot as plt
import numpy as np
import random
# + tags=["hide-input"]
def f(x=50):
min = 1
max = 6
D1, D2, D3, D4, D5, D6 = 0, 0, 0, 0, 0, 0
for i in range(x):
num1 = random.randint(min, max)
if num1 == 1:
D1 += 1
elif num1 == 2:
D2 += 1
elif num1 == 3:
D3 += 1
elif num1 == 4:
D4 += 1
elif num1 == 5:
D5 += 1
elif num1 == 6:
D6 += 1
plt.figure()
plt.bar([1,2,3,4,5,6],[D1,D2,D3,D4,D5,D6])
plt.ylim(0, (x/6)*2)
plt.ylabel('Total Number of Occurences ($n$)')
plt.xlabel('Die Value')
plt.show()
interactive_plot = interactive(f, x=widgets.IntSlider(min=50,max=5000,step=495, continuous_update=False))
output = interactive_plot.children[-1]
output.layout.height = '300px'
interactive_plot
# -
# ### Review Questions
#
# *What pattern emerges when the number of rolls increases? In other words, what happens to the graph when you move the slider to 5000 rolls?*
# + tags=["hide-input"]
def multiple_choice(O1, O2, O3, O4):
question_prompts = [O1, O2, O3, O4]
answer = question_prompts[0]
letters = ["a)", "b)", "c)", "d)"]
#Starts and ends bolded letters
start = "\033[1m"
end = "\033[0;0m"
#Randomly shuffles the options
random.shuffle(question_prompts)
#Prints the letters a) - d) in sequence with randomly chosen options
for i in range(4):
selection = question_prompts.pop()
print(start + letters[i] + end + selection)
#Stores the correct answer
if selection == answer:
letter_answer = letters[i]
button1 = widgets.Button(description="a)")
button2 = widgets.Button(description="b)")
button3 = widgets.Button(description="c)")
button4 = widgets.Button(description="d)")
button1.style.button_color = 'Whitesmoke'
button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'
button4.style.button_color = 'Whitesmoke'
container = widgets.HBox(children=[button1,button2,button3,button4])
display(container)
def on_button1_clicked(b):
if "a)" == letter_answer:
print("Correct! 👏", end='\r')
button1.style.button_color = '#abffa8'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again! ", end='\r')
button1.style.button_color = '#ffbbb8'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button2_clicked(b):
if "b)" == letter_answer:
print("Correct! 👏", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = '#abffa8'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = '#ffbbb8'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button3_clicked(b):
if "c)" == letter_answer:
print("Correct! 👏", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = '#abffa8'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = '#ffbbb8'; button4.style.button_color = 'Whitesmoke'
def on_button4_clicked(b):
if "d)" == letter_answer:
print("Correct! 👏", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = '#abffa8'
else:
print("Try again! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = '#ffbbb8'
button1.on_click(on_button1_clicked)
button2.on_click(on_button2_clicked)
button3.on_click(on_button3_clicked)
button4.on_click(on_button4_clicked)
Option1 = "The total for each dice value becomes roughly the same as the number of rolls increases."
Option2 = "The graph will look like a staircase; i.e. 1 has the lowest total and 6 has the highest."
Option3 = "The graph will look like a hill; i.e. the middle values (3 and 4) will have the highest totals."
Option4 = "No pattern emerges."
multiple_choice(Option1, Option2, Option3, Option4)
# -
# *Read the number of number of occurrences ($n$) of a particular die value (say 3) and calculate the experimental probability as the number of experiments ($x$) increases. To what value does the experimental probability trend?*
# + tags=["hide-input"]
#import random
#Assign each question to these four variables. Note: there must be 4 questions and 1 answer.
Option1 = "Probability = 1/6" #Make this the answer. It'll be randomized later.
Option2 = "Probability = 1/2"
Option3 = "Probability = 1/3"
Option4 = "Probability = 1/4"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# ### Experimental Coin Simulator
#
# Let's switch from dice to coins as a further example. A coin has two sides: "Head" and "Tails". The following simulator counts the number of occurrences of each when a fair coin is flipped experimentally $x$ times.
#
# **Question:** *Similar to the dice bar chart above, what shape/pattern emerges if you toss a coin thousands of times?*
# + tags=["hide-input"]
def g(x=10):
min = 1
max = 2
Heads, Tails = 0,0
for i in range(x):
num1 = random.randint(min, max)
if num1 == 1:
Heads += 1
elif num1 == 2:
Tails += 1
plt.figure()
plt.bar(["Heads","Tails"],[Heads, Tails])
plt.ylim(0, (x/2)*2)
plt.ylabel('Total')
plt.xlabel('Coin Value')
plt.show()
interactive_plot = interactive(g, x=widgets.IntSlider(min=10,max=5000,step=499, continuous_update=False))
output = interactive_plot.children[-1]
output.layout.height = '300px'
interactive_plot
# + tags=["hide-input"]
Option1 = "The two columns become more and more similar in height."
Option2 = "No pattern!"
Option3 = "The 'Tails' column becomes twice as big as the 'Heads' column."
Option4 = "'Heads' is more likely and so its column becomes bigger."
multiple_choice(Option1, Option2, Option3, Option4)
# -
# **Question:** *Calculate the experimental probability of the event of getting a "Head" on a single coin toss using your data for a large number of experiments. What do you think the exact probability is?*
# ### Independence and the Gambler's Fallacy
#
# We have seen, based on experiment, that a fair coin has probability of $\frac{1}{2}$ of resulting in "Heads".
#
# **Question:** *Suppose you toss a fair coin and it comes up "Heads" **nine times** in a row. What is the chance that **the next toss** will also be a "Head"?*
# + tags=["hide-input"]
Option1 = "1/2 chance (50%)"
Option2 = "Nearly zero percent chance!"
Option3 = "1/6 chance"
Option4 = "1/4 chance (25%)"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# **Note:** It is common for people to think that a Tail is "overdue", but the next toss of the coin is **totally independent** of any previous tosses.
#
# Saying "a Tail is due" is called **The Gambler's Fallacy**. Don't fall for it!
# ## Calculating the Probability of Independent Events Theoretically
#
# We have seen how to estimate the probability of an event experimentally. In many cases we can calculate an exact **theoretical probability** using the following formula:
#
# \begin{equation}
# \text{Probability of an event happening} = \dfrac{\text{Number of ways it can happen}}{\text{Total number of possible outcomes}}
# \end{equation}
#
# **Example:** What is the probability of getting a "Tail" when tossing a coin?
#
# - **Number of ways it can happen: 1** (Tail)
# - **Total number of possibly outcomes: 2** (Head and Tail)
#
#
# Therefore, the probability of getting a "Tail" = $\dfrac{1}{2}=0.5$
#
# **Example:** What is the probability of getting a "3" or "6" when rolling a die?
#
# - **Number of ways it can happen: 2** ("3" and "6")
# - **Total number of possibly outcomes: 6** ("1", "2", "3", "4", "5", "6")
#
#
# Therefore, the probability = $\dfrac{2}{6}=\dfrac{1}{3}=0.333...$
#
# **Notes:**
# - Probabilities are often shown as a **decimal**, **fraction**, or a **percentage**. Here are **three** ways of showing the probability of flipping a "Head":
#
# - As a decimal: **0.5**
# - As a fraction: **1/2**
# - As a percentage: **50%**
#
#
# - The **theoretical probability formula** looks similar to the **experimental formula**, but both formulas are quite different. Using the theoretical formula, no experiment is done. We simply consider the possible outcomes and count those corresponding to the event of interest. Unlike an experimental probability, we will always get the **exact same answer** when we calculate the theoretical probability! *The theoretical probability is the true probability*. Experimental probabilities *approximate* the true probability. The approximations improve as the number of experiments ($x$) is increased.
# ### Review Questions
#
# *What is the probability of getting a "3" or "5" when rolling a die?*
# + tags=["hide-input"]
Option1 = "Probability = 1/3"
Option2 = "Probability = 1/6"
Option3 = "Probability = 1"
Option4 = "Probability = 3/6 or 5/6"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# *What is the probability of getting a "1", "2", or "3" when rolling a die?*
# + tags=["hide-input"]
Option1 = "Probability = 3/6 or 1/2"
Option2 = "Probability = 1/6"
Option3 = "Probability = 1/3"
Option4 = "Probability = 0"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# ## The Probability of Multiple Independent Events
#
# What if we wish to compute the probability of **multiple** independent events happening? For example, what is the probability of getting **three "Heads" in a row**? Or, equivalently, if we **flip three coins**, what is the probability of getting exactly **three "Heads"**?
#
# The following chart does the experiment of flipping three coins $x$ times and keeps track of the number of times ($n$) that three "Heads" occurs.
# + tags=["hide-input"]
repeated_heads = 3
def h(n):
sum1 = 0
for i in range(n):
num = random.randint(0,1)
sum1 += num
if sum1 == n:
return True
else:
return False
def a(x=1000):
good = 0
bad = 0
for j in range(x):
y = h(repeated_heads)
if y == True:
good += 1
else:
bad += 1
plt.figure()
plt.bar(["Total Tosses","3 Heads in a Row"],[bad+good, good])
plt.ylim(0, x + 10000)
plt.ylabel('Total number of occurrences ($n$)')
plt.xlabel('')
plt.show()
#print("After flipping a coin three times for " + str(x) + " iterations, the probability of getting three heads in a row can be approximated as: (Number of 3 Heads in a row) / (Total number of trials) = ", str(good) + "/" + str(good+bad) + " =", round(good/(good+bad), 5))
print("After flipping three coins " + str(x) + " times, the probability of getting three heads is experimentally:\n (Number of times 3 Heads occurs) / (Total number of trials) = n/x =", str(good) + "/" + str(good+bad) + " =", round(good/(good+bad), 5))
interactive_plot = interactive(a, x=widgets.IntSlider(min=1000,max=100000,step=9900, continuous_update=False))
output = interactive_plot.children[-1]
output.layout.height = '300px'
interactive_plot
# -
# ### Review Question
#
# *As you move the slider to the right, what theoretical probability do your experimental results approach?*
# + tags=["hide-input"]
Option1 = "0.125"
Option2 = "0.5"
Option3 = "1.5"
Option4 = "0.25"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# ### Computing Multiple Independent Event Probabilities Theoretically
#
# **Question:** How do we quickly compute the theoretical probability of getting three Heads on a flip of three coins?
#
# **Answer:** We can calculate the chances of two or more **independent** events by **multiplying** the chances. For each toss of a coin, a "Head" has a probability of 0.5. And so the chance of getting **three Heads in a row is**:
#
# \begin{equation}
# 0.5 \times 0.5 \times 0.5 = \boldsymbol{0.125}
# \end{equation}
#
# <img style="float: center;" src="Images/TREEFINAL.svg" width="50%">
#
# Another way of seeing this is to write out all of the possibilities. We call this the *sample space* of the random trial. In this example, the random trial is flipping three coins. Below is this sample space:
#
# <img style="float: center;" src="Images/SampleSpace.svg" width="50%">
#
# Observe that only **one** of the **eight** possibilities is "HHH". Hence, the probability of getting three Heads on a flip of three coins is **1/8 = 0.125**.
# **Notes:**
# - If you multiply probabilities in this way you must express them in decimal form first, not as percents! If you want to express the final result as a percent, you can convert it to a percent by multiplying by 100%. Here for example, 0.125 becomes 12.5%.
# - Computing multiple independent events theoretically in this manner (through multiplication) requires that each outcome is **equally likely**.
# ### Review Questions
#
# *What is the probability of 7 heads in a row? (Hint: use a calculator).*
# + tags=["hide-input"]
Option1 = "0.0078125"
Option2 = "0.03125"
Option3 = "0.015625"
Option4 = "0.5"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# *Given that **we have just got 6 heads in a row**, what is the probability that **the next toss** is also a head? Or equivalently, suppose we **flip 7 coins** and find that the **first six are heads**, what is the probability the last coin is a **head**? *
# + tags=["hide-input"]
Option1 = "0.5"
Option2 = "0.03125"
Option3 = "0.015625"
Option4 = "0.0078125"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# ### A Cleaner Notation
#
# Typically, we use "P" to mean "Probability Of". For Independent Events, we have
#
# \begin{equation}
# \text{P(A and B) = P(A)} \times \text{P(B)}
# \end{equation}
#
# So we denote "the probability of getting 2 heads in a row" by
#
# \begin{equation}
# \text{P(Heads and Heads) = P(Heads)} \times \text{P(Heads) = 0.5} \times \text{0.5 = 0.25}.
# \end{equation}
# ### Review Questions
#
# *How would you denote: "the probability of rolling a "1" twice in a row and then rolling a "5" on a die"? Or equivalently when rolling three dice that the first two are "1" and the last one is a "5"?*
# + tags=["hide-input"]
Option1 = "P('1' and '1' and '5')"
Option2 = "P('1' and '5')"
Option3 = "P('1')P('5')"
Option4 = "P('1' and '1' or '5')"
multiple_choice(Option1, Option2, Option3, Option4)
# -
# *For a coin, let H = "Heads" and T = "Tails". What is P(H and T and T and H)? Input your exact decimal answer in the textbox bellow.*
# + tags=["hide-input"]
text = widgets.Text(
value='',
placeholder='Type probability (decimal)',
description="Probability =",
disabled=False
)
button = widgets.Button(description="Hint")
hint = [widgets.Label("P(H and T and T and H) = P(H) \u00D7 P(T) \u00D7 P(T) \u00D7 P(H). Recall that P(H) = P(T)=0.5")]
display(text)
def on_button_clicked(b):
widgets.Box(hint)
#print("P(H and T and T and H) = P(H) \u00D7 P(T) \u00D7 P(T) \u00D7 P(H). Recall that P(H) = P(T)=0.5")
def callback(wdgt):
if (wdgt.value == "0.0625") or (wdgt.value == ".0625") or (wdgt.value == "1/16"):
print("Correct! 👏 ", end='\r')
text.on_submit(callback)
else:
print("Try again! Hint: P(H and T and T and H) = P(H) \u00D7 P(T) \u00D7 P(T) \u00D7 P(H). Recall that P(H) = P(T) = 0.5", end='\r')
text.on_submit(callback)
#display(button)
#button.on_click(on_button_clicked)
text.on_submit(callback)
# -
# ## Application Example
#
# Suppose we have two groups:
# - A member of each group gets randomly chosen for the winners' circle
# - Then, one of those gets randomly chosen to get the grand prize.
#
# **Question:** *What is your chance of winning the grand prize if:*
# - *there is a **1/5 chance** of going to the winners' circle, and*
# - *there is a **1/2** chance of winning the big prize?*
#
# <img style="float: center;" src="Images/Lottery.svg" width="55%">
# + tags=["hide-input"]
text2 = widgets.Text(
value='',
placeholder='Type probability (decimal)',
description="Probability =",
disabled=False
)
display(text2)
def callback2(wdgt):
if (wdgt.value == "0.1") or (wdgt.value == ".1") or (wdgt.value == "1/10"):
print("Correct! 👏 ", end='\r')
text2.on_submit(callback2)
else:
print("Try again! Hint: You have a 1/5 chance followed by a 1/2 chance of winning.", end='\r')
text2.on_submit(callback2)
text2.on_submit(callback2)
# -
# ## Conclusion
#
# - Experimental Probability = (Number of occurrences of event)/(Number of experiments done)
# - Theoretical Probability = (Number of ways event can happen)/(Total number of possible outcomes)
# - Dependent events (removing cards from a deck of cards) are affected by **previous events**
# - Independent events (such as rolling a die) are **not** affected by previous events
# - Calculate the probability of 2 or more **independent** events by **multiplying**
# [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/jupyter_execute/curriculum-notebooks/Mathematics/IndependentProbability/independent-probability.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="m_3F2cWjR5oY" colab_type="text"
# # MNIST DCGAN Example
# + [markdown] id="dA2m6DVoIMPP" colab_type="text"
# Note: This notebook is desinged to run with Python3 and GPU runtime.
#
# ![Python 3 and CPU runtime](https://raw.githubusercontent.com/enakai00/colab_tfbook/master/docs/imgs/runtime_gpu.png)
# + [markdown] id="53sGGhqt_00C" colab_type="text"
# This notebook uses TensorFlow 2.x.
# + id="u0PiOopl7MH0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a5eec980-4b59-414a-c142-6115766a4646"
# %tensorflow_version 2.x
# + [markdown] id="HqKgjio7IQCa" colab_type="text"
# ####[MDE-01]
# Import modules and set a random seed.
# + id="8uoZRr9eOmwG" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, models, initializers
from tensorflow.keras.datasets import mnist
np.random.seed(20191019)
tf.random.set_seed(20191019)
# + [markdown] id="8NxTNnogIUeV" colab_type="text"
# ####[MDE-02]
# Download the MNIST dataset and store into NumPy arrays.
# + id="0ByKCdhESGpP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e4859864-783a-4965-dc99-96ceac51ba5c"
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(
(len(train_images), 784)).astype('float32') / 255
test_images = test_images.reshape(
(len(test_images), 784)).astype('float32') / 255
train_labels = tf.keras.utils.to_categorical(train_labels, 10)
test_labels = tf.keras.utils.to_categorical(test_labels, 10)
# + [markdown] id="LyJSyjr9JA_G" colab_type="text"
# ####[MDE-03]
# Defina a generator model.
# + id="qKvSYzl9SSkf" colab_type="code" outputId="f525df80-a126-4255-e9d0-d2a2d78d8b42" colab={"base_uri": "https://localhost:8080/", "height": 391}
latent_dim = 64
generator = models.Sequential()
generator.add(
layers.Dense(7*7*128, kernel_initializer=initializers.TruncatedNormal(),
input_shape=(latent_dim,), name='expand'))
generator.add(layers.LeakyReLU(name='leaky_relu1'))
generator.add(layers.Reshape((7, 7, 128), name='reshape'))
generator.add(
layers.Conv2DTranspose(64, 5, strides=2, padding='same',
kernel_initializer=initializers.TruncatedNormal(),
name='deconv1'))
generator.add(layers.LeakyReLU(name='leaky_relu2'))
generator.add(
layers.Conv2DTranspose(1, 5, strides=2, padding='same',
kernel_initializer=initializers.TruncatedNormal(),
activation='sigmoid', name='deconv2'))
generator.add(layers.Flatten(name='flatten'))
generator.summary()
# + [markdown] id="-3DbHA-0Jb5z" colab_type="text"
# ####[MDE-04]
# Defina a discriminator model.
# + id="iyOLjN7kX1Px" colab_type="code" outputId="cd8a3ab3-268e-4f1f-fdbe-2f7ee1506813" colab={"base_uri": "https://localhost:8080/", "height": 425}
discriminator = models.Sequential()
discriminator.add(layers.Reshape((28, 28, 1), input_shape=((28*28,)),
name='reshape'))
discriminator.add(
layers.Conv2D(64, (5, 5), strides=2, padding='same',
kernel_initializer=initializers.TruncatedNormal(),
name='conv1'))
discriminator.add(layers.LeakyReLU(name='leaky_relu1'))
discriminator.add(
layers.Conv2D(128, (5, 5), strides=2, padding='same',
kernel_initializer=initializers.TruncatedNormal(),
name='conv2'))
discriminator.add(layers.LeakyReLU(name='leaky_relu2'))
discriminator.add(layers.Flatten(name='flatten'))
discriminator.add(layers.Dropout(rate=0.4, name='dropout'))
discriminator.add(layers.Dense(1, activation='sigmoid', name='sigmoid'))
discriminator.summary()
# + [markdown] id="9zzzeCD2J-9q" colab_type="text"
# ####[MDE-05]
# Compile the discriminator using the Adam optimizer, and Cross entroy as a loss function.
# + id="XEwdyyX5SeHg" colab_type="code" colab={}
discriminator.compile(optimizer='adam', loss='binary_crossentropy')
# + [markdown] id="is6hVDllKRAi" colab_type="text"
# ####[MDE-06]
# Define an end-to-end GAN model to train the generator.
# + id="VqLQnc3Gd_rR" colab_type="code" outputId="283ee2f0-e6e1-40ca-9dd1-8e0b1fbf7f44" colab={"base_uri": "https://localhost:8080/", "height": 255}
discriminator.trainable = False
gan_input = tf.keras.Input(shape=(latent_dim,))
gan_output = discriminator(generator(gan_input))
gan_model = models.Model(gan_input, gan_output)
gan_model.summary()
# + [markdown] id="PVMUs1WtKrQu" colab_type="text"
# ####[MDE-07]
# Compile the GAN model using the Adam optimizer, and Cross entroy as a loss function.
# + id="2A4dwLwTeywN" colab_type="code" colab={}
gan_model.compile(optimizer='adam', loss='binary_crossentropy')
# + [markdown] id="5FocFzOJK8ac" colab_type="text"
# ####[MDE-08]
# Define some working variables to trace the training process.
# + id="bK7cwT-umD1r" colab_type="code" colab={}
batch_size = 32
image_num = 0
step = 0
examples = []
sample_inputs = np.random.rand(8, latent_dim) * 2.0 - 1.0
examples.append(generator.predict(sample_inputs))
# + [markdown] id="zCGbB86tLNfK" colab_type="text"
# ####[MDE-09]
# Train the model for 40,000 batches.
# + id="33eI8EHqTHgo" colab_type="code" outputId="f13b168b-b917-400d-9a33-614e5a93ef11" colab={"base_uri": "https://localhost:8080/", "height": 187}
for _ in range(40000):
random_inputs = np.random.rand(batch_size, latent_dim) * 2.0 - 1.0
generated_images = generator.predict(random_inputs)
real_images = train_images[image_num : image_num+batch_size]
all_images = np.concatenate([generated_images, real_images])
labels = np.concatenate([np.zeros((batch_size, 1)),
np.ones((batch_size, 1))])
labels += 0.05 * np.random.random(labels.shape)
d_loss = discriminator.train_on_batch(all_images, labels)
random_inputs = np.random.rand(batch_size, latent_dim) * 2.0 - 1.0
fake_labels = np.ones((batch_size, 1))
g_loss = gan_model.train_on_batch(random_inputs, fake_labels)
image_num += batch_size
if image_num + batch_size > len(train_images):
image_num = 0
step += 1
if step % 4000 == 0:
print('step: {}, loss(discriminator, generator): {:6.4f}, {:6.4f}'.format(
step, d_loss, g_loss))
examples.append(generator.predict(sample_inputs))
# + [markdown] id="kIwtyrxVLrWZ" colab_type="text"
# ####[MDE-10]
# Show the progress of sample images.
# + id="yR5SvOxtSu1v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 755} outputId="7bf14fd8-db1c-4d4d-da26-9353d56fded6"
def show_images(examples):
fig = plt.figure(figsize=(10, 1.2*len(examples)))
c = 1
for images in examples:
for image in images:
subplot = fig.add_subplot(len(examples), 8, c)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(image.reshape((28, 28)),
vmin=0, vmax=1, cmap=plt.cm.gray_r)
c += 1
show_images(examples)
# + [markdown] id="TBfRGalOL-kq" colab_type="text"
# ####[MDE-10]
# Mount your Google Drive on `/content/gdrive`.
# + id="qwe3Gb8xH-wW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c461aba8-6059-4996-cf52-c7135b2cf77a"
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="l1HdULSEMDS9" colab_type="text"
# ####[MDE-11]
# Export the trained model as a file `gan_generator.hd5` on your Google Drive.
# + id="ShhsTXJtH3rg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="699cbeca-a29e-4bea-d732-cd5118ec575b"
generator.save('/content/gdrive/My Drive/gan_generator.hd5', save_format='h5')
# !ls -lh '/content/gdrive/My Drive/gan_generator.hd5'
| Chapter05/7. MNIST DCGAN example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/emanbuc/ultrasonic-vision/blob/main/data_visualizzation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Ev6kEE8HoC-J"
# # Univariate Plots
# Techniques that you can use to understand each attribute independently.
# + [markdown] id="QcXQHKTfn2zP"
# ## Histograms
# A fast way to get an idea of the distribution of each attribute is to look at histograms.
#
# Histograms group data into bins and provide you a count of the number of observations in each bin. From the shape of the bins you can quickly get a feeling for whether an attribute is Gaussian’, skewed or even has an exponential distribution. It can also help you see possible outliers
# + colab={"base_uri": "https://localhost:8080/", "height": 519} id="IuTL72XokA4J" outputId="72a3f672-d20c-4734-d166-614d24e5627d"
# Univariate Histograms
import matplotlib.pyplot as plt
import pandas
url = "https://raw.githubusercontent.com/emanbuc/ultrasonic-vision/main/sample_acquisitions/7sensors/20210102/20210102_alldata.csv"
names = ['HCSR04_001', 'HCSR04_002', 'HCSR04_003', 'HCSR04_004', 'HCSR04_005', 'HCSR04_006', 'HCSR04_007']
data = pandas.read_csv(url, usecols=names)
print(data)
data.hist()
plt.show()
# + [markdown] id="fXG87Jhxnt0Q"
# ### Density Plots
# Density plots are another way of getting a quick idea of the distribution of each attribute. The plots look like an abstracted histogram with a smooth curve drawn through the top of each bin, much like your eye tried to do with the histograms.
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="_2EDOu-FnqHb" outputId="2bf92f88-3045-4dc5-a155-4c4874c05e8b"
data.plot(kind='density', subplots=True, layout=(3,3), sharex=False)
plt.show()
# + [markdown] id="4wcPoOzoDQdN"
# La distanza stimata da HCSR04_006, HCSR04_007 è errata: la distanza reale era compresa tra i 20 -70 cm
# + [markdown] id="atbfneCqpAhP"
# ### Box and Whisker Plots
# Another useful way to review the distribution of each attribute is to use Box and Whisker Plots or boxplots for short.
#
# Boxplots summarize the distribution of each attribute, drawing a line for the median (middle value) and a box around the 25th and 75th percentiles (the middle 50% of the data). The whiskers give an idea of the spread of the data and dots outside of the whiskers show candidate outlier values (values that are 1.5 times greater than the size of spread of the middle 50% of the data).
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="jNOhbNOlo5Jn" outputId="dadcf7c8-2a7f-46c5-9698-206cdb586143"
data.plot(kind='box', subplots=True, layout=(2,4), sharex=False, sharey=False)
plt.show()
# + [markdown] id="qcKDS370pz7m"
# # Multivariate Plots
# Examples of plots with interactions between multiple variables.
# + [markdown] id="ddITQAWEp5q1"
# ## Correlation Matrix Plot
# Correlation gives an indication of how related the changes are between two variables. If two variables change in the same direction they are positively correlated. If the change in opposite directions together (one goes up, one goes down), then they are negatively correlated.
#
# You can calculate the correlation between each pair of attributes. This is called a correlation matrix. You can then plot the correlation matrix and get an idea of which variables have a high correlation with each other.
#
# This is useful to know, because some machine learning algorithms like linear and logistic regression can have poor performance if there are highly correlated input variables in your data.
# + colab={"base_uri": "https://localhost:8080/", "height": 272} id="HpJaKUP3p4cl" outputId="8cafd58d-38a7-4e79-bbfb-8ed3fad023f1"
# Correction Matrix Plot
import matplotlib.pyplot as plt
import pandas
import numpy
correlations = data.corr()
# plot correlation matrix
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = numpy.arange(0,9,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(names)
ax.set_yticklabels(names)
plt.show()
# + [markdown] id="hBRQD2g9qz_c"
# ### Scatterplot Matrix
# A scatterplot shows the relationship between two variables as dots in two dimensions, one axis for each attribute. You can create a scatterplot for each pair of attributes in your data. Drawing all these scatterplots together is called a scatterplot matrix.
#
# Scatter plots are useful for spotting structured relationships between variables, like whether you could summarize the relationship between two variables with a line. Attributes with structured relationships may also be correlated and good candidates for removal from your dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="6RBG2yBorBa7" outputId="61397f5d-5037-4558-9498-bf7294f822ac"
# Scatterplot Matrix
import matplotlib.pyplot as plt
import pandas
from pandas.plotting import scatter_matrix
scatter_matrix(data)
plt.show()
# + [markdown] id="DVU4nzjzsjDT"
# # New Features
# + colab={"base_uri": "https://localhost:8080/"} id="NSrG52NOsiwj" outputId="a4a8874c-fb1b-4725-8f96-636948a6fd1e"
url = "https://raw.githubusercontent.com/emanbuc/ultrasonic-vision/main/sample_acquisitions/7sensors/20210102/20210102_alldata.csv"
names = ['HCSR04_001', 'HCSR04_002', 'HCSR04_003', 'HCSR04_004', 'HCSR04_005', 'HCSR04_006', 'HCSR04_007',"ObjectClass"]
data = pandas.read_csv(url, usecols=names)
# somma delle distanze dai sensori bassi montati sui pannelli verticali
data['distanceSumHi'] = data.HCSR04_001 + data.HCSR04_002
# somma delle distanze dai sensori alti montati sui pannelli orizzontali
data['distanceSumLow'] = data.HCSR04_003 + data.HCSR04_004
# differenza distanza da sensori tetto
data['differentialDistanceFromRoof65'] = data.HCSR04_006 - data.HCSR04_005
data['differentialDistanceFromRoof67'] = data.HCSR04_006 - data.HCSR04_007
data['differentialDistanceFromRoof57'] = data.HCSR04_005 - data.HCSR04_007
print(data)
# + [markdown] id="wO_kaOncJ_-V"
# # Analisi dati di training per le varie classi di oggetti
# + colab={"base_uri": "https://localhost:8080/", "height": 472} id="0qW2u98_tx76" outputId="409e7686-7929-499d-cb95-e043d933e534"
groupedByClass = data.groupby(['ObjectClass'])
groupedByClass.first()
# + [markdown] id="zB5rmUT-PHft"
# # Elimino letture anomale
# + [markdown] id="tgZIVZ4rYB3f"
# Le distanze misurate quando non ci sono oggetti sono i valori massimi che mi aspetto. Valori superiori indicano una anomalia nella misurazione (es. multipah, scattering, ...)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="QACZuA8TXs1V" outputId="e6bb0f84-2625-4f7b-ad8e-76e1fb8ba4a3"
df_EMPTY_SEVEN = groupedByClass.get_group('EMPTY_SEVEN')
df_EMPTY_SEVEN.plot(kind='density', subplots=True, layout=(2,6), sharex=False, figsize=(30,15))
plt.show()
df_EMPTY_SEVEN
# + [markdown] id="L64MUTanZ8Hx"
# La misura del sensore 007 presenta un errore sistematico che sposta il picco da 50cm a 100cm circa
#
# La distanza reale è di circa 50cm
# + id="ogs9b9ypPOQe"
cleanedData= data[(data["distanceSumLow"] <= 200) & (data["HCSR04_006"] <= 100) & (data["HCSR04_005"] <= 100)]
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="6XwCcjBBLmfS" outputId="3458a7cb-6283-47cf-c991-30941f9e0711"
newdf = cleanedData.query('ObjectClass == "SQUARE_MILK_45" | ObjectClass == "SQUARE_MILK_90" | ObjectClass == "SOAP_BOTTLE_FRONT" | ObjectClass == "SOAP_BOTTLE_SIDE" | ObjectClass == "BEAN_CAN" | ObjectClass=="RECTANGULAR_BOX" | ObjectClass=="RECTANGULAR_BOX_SIDE" | ObjectClass=="GLASS" | ObjectClass=="EMPTY_SEVEN"')
fig, ax = plt.subplots()
colors = {'SQUARE_MILK_45':'red', 'SQUARE_MILK_90':'blue', 'SOAP_BOTTLE_SIDE':'green', 'SOAP_BOTTLE_FRONT':'black','BEAN_CAN':'yellow','RECTANGULAR_BOX':'pink', 'RECTANGULAR_BOX_SIDE': 'orange','GLASS':'brown',"EMPTY_SEVEN":'grey'}
groupedByClass = newdf.groupby(['ObjectClass'])
for key, group in groupedByClass:
group.plot(ax=ax, kind='scatter', x='distanceSumLow', y='distanceSumHi', label=key, legend=True, color=colors[key],figsize=(20,10))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="WZaqeGeLUl8x" outputId="4dae3bd3-16d9-4ac9-e498-ae514a533470"
fig, ax = plt.subplots()
for key, group in groupedByClass:
group.plot(ax=ax, kind='scatter', x='differentialDistanceFromRoof65', y='differentialDistanceFromRoof67', label=key, legend=True, color=colors[key],figsize=(20,10))
plt.show()
| notebooks/.ipynb_checkpoints/data_visualizzation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="wdx6dxSuzXlc"
# # Spotify Song Suggestor
#
# 1. our model takes the spotify track_id
# `track_id = '11d9oUiwHuYt216EFA2tiz' # ice ice baby`
#
# 2. recommend 10 songs based off of the user's initial song
# ```
# ['11d9oUiwHuYt216EFA2tiz',
# '1mDZ1L4UA9zBq2tzFxH31a',
# '1pcBtixsF2z5AzvLKx7INI',
# '<KEY>',
# '4EV73dmNsiz8QnJnX1gmkr',
# '1LytkZ67Tquo5A5TyzqVcZ',
# '5xXqBoctoRdzHWiCxo1bIA',
# '5anKv2QxkjZPNTvaztpHw4',
# '49CdYBpfABUa0ZfT8FizQZ',
# '3rVm6G4AfyRXYI4wm0Bm5I']
# ```
#
# + [markdown] id="pVKP5KQ-y-0K"
# ## Dataset Roles and Rubric
#
#
# Spotify Dataset 1921-2020, 160k+ Tracks
# Audio features of 160k+ songs released in between 1921 and 2020
# Dataset
#
# https://www.kaggle.com/yamaerenay/spotify-dataset-19212020-160k-tracks
#
#
#
# Roles For Team
# https://www.notion.so/9c604209b5f14e8490d393aa71278ff2?v=7dfc7c90a9054edaa4f698afb9043fec
#
# Machine Learning Engineer
# https://www.notion.so/b29e86c10a09432787880eff6e8914b9?v=c5857f6d5ca24e0785884c7e78e6b176
#
#
#
#
# + id="ztjAKCwEQ8Dt"
# !pip install pandas-profiling==2.*
import pandas as pd
from google.colab import output # to clear pip output to screen
output.clear()
# + id="emGaw67OaJkt"
# !wget -O song_zip.zip https://github.com/JeffreyAsuncion/project_datasets/blob/main/song_zip.zip?raw=true
output.clear()
# + id="GIisBXjqJr0D"
import zipfile
with zipfile.ZipFile("song_zip.zip", "r") as zip_ref:
zip_ref.extractall("targetdir")
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="Ll-zdhqsJr2o" outputId="ae090630-c9fb-4c27-a2b1-2c12ca04d883"
data = pd.read_csv("/content/targetdir/data.csv")
print(data.shape)
data.head()
# + [markdown] id="0gkyv31Xmg3x"
# ## DATA DIVE
#
# 1. info() to look for column_names, type, non-null
# 2. describe() to look at the measures of central tendency, mean, quartiles
# 3. isnull()
# 4. categorical variables, num of unique
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["88fcfd084a7b4b02a99113c1d20fabdf", "83a0f341207f4554a71147cc5ee9396c", "ba82e86a0c824880a465bc3e542ac8ba", "95a00bd1144c4bec82233890cabe3cfc", "d0fae6903fc14829a7f1d2386de6ad73", "<KEY>", "<KEY>", "0901382fe638435d857dd295ef43a47a", "97e0620c87ad4542b84a7a19ea01b782", "bb92f4d963ff495f9fcafd8f59983dd2", "05f9beff38a24c1ea33e18e66bf3fb86", "30a4f276609349f883e73dacc1a779c2", "fc5b8844085b45d98b95f07afd593dc5", "eb6d8df8ebc54e768ffd80b5f889d033", "0ec0eeb2084a4b48b37df8c0c35da5fa", "a300ce68bab2465784302f1bdebcd7fd", "<KEY>", "81c445fd5adc4851b45e5014c55e76f5", "d96ebfa0b84e4722af11a315d4030514", "4147d58c67a84595baee5a1fe41d8de0", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "011b49048074453ba61f0a468647eac9", "<KEY>", "8fa89c96dd824a1ea5b00433b0ac24aa", "ed1ce4e4caed47908397b05da4f74c1c", "b9b4e140106742f49f5163787a8029bf", "<KEY>", "699a500baba143a5ae6f4efc4e30a43e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f80aaef5e71544e3b7258adcd9a21b57", "2e537eea8947438a96e86695a1a70747", "<KEY>"]} id="fIDasn6G_Qpk" outputId="d7819283-3ccb-4176-cc5c-de4d7b821023"
from pandas_profiling import ProfileReport
profile = ProfileReport(data)
profile.to_file(output_file='fullsongs.html')
profile.to_notebook_iframe()
# + colab={"base_uri": "https://localhost:8080/", "height": 490} id="eYGYPmS0mkO9" outputId="17d5ed20-8f3f-4471-f0f6-522687ab4f6b"
data.info()
# + [markdown] id="a0sY93IDnKL7"
# NUMERICAL FEATURES
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="PkhXWIMYmmyb" outputId="2d65f3c1-2dc1-4fba-a059-edebeed01285"
data.describe().T
# + colab={"base_uri": "https://localhost:8080/", "height": 381} id="2bjIqT3Wmvuq" outputId="1438b67e-e5a2-4023-ce26-82f51d95517d"
data.isnull().sum()
# + [markdown] id="kRew9ECXm333"
# CATEGORICAL FEATURES
# + colab={"base_uri": "https://localhost:8080/", "height": 168} id="k9cyCzWDm3Ga" outputId="25f8e9d7-9ee5-4728-8c15-b2e4e0e2194a"
data.describe(exclude='number')
# + [markdown] id="chdCUZeWar_H"
# ## Find a Sample Song #1
# + colab={"base_uri": "https://localhost:8080/", "height": 585} id="JwFHJCeSwv4D" outputId="96547c76-751c-434b-af24-0ccae49a3cab"
# how to search a song from name of song
data[data['name'] == 'Stand by Me']
# + [markdown] id="UDjr9cKHawhm"
# ## Find a Sample Song #2
# + colab={"base_uri": "https://localhost:8080/", "height": 163} id="6a6z74PSan-Q" outputId="8ed9b9f8-b558-4bb8-f29a-eb018a09cf0f"
# how to search a song from name of song
data[data['name'] == 'Just a Friend']
# + colab={"base_uri": "https://localhost:8080/", "height": 115} id="AROprYs3LYW4" outputId="e73de499-49cc-40b9-f415-4232c36979ca"
# how to search song names from ids
# on the ML side
data[data['id']=='3yNVfRQlUPViUh8O2V9SQn'] # Biz Markie - Just a Friend
# + [markdown] id="umGiBIPpy8LL"
# ## Find a Sample Song #3
# + colab={"base_uri": "https://localhost:8080/", "height": 465} id="GI-w5pRorCiB" outputId="0bd73bdb-86d4-4d8d-c3e4-41c2d8f4799b"
# how to search a song from name of song
data[data['name'] == 'Mama Said Knock You Out']
# + [markdown] id="RgPCghzgXvR1"
# ## Setup Features for Model and Data Cleaning
#
# domain suggested features
# https://towardsdatascience.com/what-makes-a-song-likeable-dbfdb7abe404
#
# + id="R4Pb4TGBXuOR"
# only include numeric cols for now
my_features = ['artists', 'name', 'id',
'danceability', 'energy', 'speechiness', 'acousticness', 'liveness', 'valence']
songs = data[my_features].copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="B6-e5Ic8X03S" outputId="f477feed-2bb6-4501-a679-406ebaf4fcbd"
songs.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 228} id="rMT0XmAeyZ5B" outputId="44902975-7c96-4e42-da93-b474ae4fccba"
# features are on scale from 0 to 10
songs.describe().T
# + [markdown] id="zc7uTFdzbsQV"
# ## Splitting Data making a Test Set - only 10% of 169,909
#
# + colab={"base_uri": "https://localhost:8080/", "height": 216} id="rP5aqq9KYAm2" outputId="473407b8-81f0-45ef-98ff-0c6b60e4901b"
from sklearn.model_selection import train_test_split
# train/test split
train, test = train_test_split(songs, test_size=0.1, random_state=42) # test size from 20% to 10%
print(train.shape, test.shape)
train.head()
# + [markdown] id="CP0L83aRp5QT"
# This is the Recommendation model : just need to turn it into a function for call on X number of user songs.
# + colab={"base_uri": "https://localhost:8080/", "height": 199} id="RmLgpEvUcPUQ" outputId="5eb4c72c-2108-4dc4-f303-66b1887496a1"
from sklearn.neighbors import NearestNeighbors
#track_id = '3yNVfRQlUPViUh8O2V9SQn' # Just a Friend
track_id = '11d9oUiwHuYt216EFA2tiz' # ice ice baby
#track_id = '2v7ywbUzCgcVohHaKUcacV' # Like a Prayer
# Instantiate and fit knn to the correct columns
NN = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')
#algorithm = 'brute','kd_tree','ball_tree'
# [3:] is to ignore the 1st 3 columns 'artist', 'name', 'id'
NN.fit(songs[songs.columns[3:]])
# take 'track id' as INPUT
song_index = songs.index[songs['id'] == track_id]
# use 'song_track_id' to find audio features
song_audio_features = songs.iloc[song_index, 3:].to_numpy()
distances, indices = NN.kneighbors(song_audio_features)
# print(type(indices))
# indices
recommended_list = list(songs.loc[indices[0], 'id'])
recommended_list
# + colab={"base_uri": "https://localhost:8080/", "height": 745} id="Fp2CPd0yaE0_" outputId="bec0bf49-ca64-4f86-c476-c8327182486c"
# this works in the notebook not sure how this shape of data will work in Flask APP or FastAPI
# how to search song names from ids
# on the ML side
for recommend in recommended_list:
recommend_song_name=songs[songs['id']==recommend]#['name']
print(recommend_song_name)
# + [markdown] id="xDFIf5K6qfUe"
# Turn it into a function
# + id="XFtW6bzkqe7b"
def recommend_song_ids(track_id):
#track_id = '3yNVfRQlUPViUh8O2V9SQn' # Just a Friend
#track_id = '11d9oUiwHuYt216EFA2tiz' # ice ice baby
#track_id = '2v7ywbUzCgcVohHaKUcacV' # Like a Prayer
# Instantiate and fit knn to the correct columns
NN = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')
#algorithm = 'brute','kd_tree','ball_tree'
# [3:] is to ignore the 1st 3 columns 'artist', 'name', 'id'
NN.fit(songs[songs.columns[3:]])
# take 'track id' as INPUT
song_index = songs.index[songs['id'] == track_id]
# use 'song_track_id' to find audio features
song_audio_features = songs.iloc[song_index, 3:].to_numpy()
distances, indices = NN.kneighbors(song_audio_features)
print(type(indices))
indices
# index = neighbors[1][0]
# print(neighbors[1])
recommended_list = list(songs.loc[indices[0], 'id'])
return recommended_list
# + colab={"base_uri": "https://localhost:8080/", "height": 217} id="UqPB0DxVpeeD" outputId="6be1a81a-e0af-4c26-c18d-831f89847f71"
recommended_song_ids = recommend_song_ids('11d9oUiwHuYt216EFA2tiz')
recommended_song_ids
# + id="fj2dQNakpelY"
def recommend_song_names(track_id):
#track_id = '3yNVfRQlUPViUh8O2V9SQn' # Just a Friend
#track_id = '11d9oUiwHuYt216EFA2tiz' # ice ice baby
#track_id = '2v7ywbUzCgcVohHaKUcacV' # Like a Prayer
# Instantiate and fit knn to the correct columns
NN = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')
#algorithm = 'brute','kd_tree','ball_tree'
# [3:] is to ignore the 1st 3 columns 'artist', 'name', 'id'
NN.fit(songs[songs.columns[3:]])
# take 'track id' as INPUT
song_index = songs.index[songs['id'] == track_id]
# use 'song_track_id' to find audio features
song_audio_features = songs.iloc[song_index, 3:].to_numpy()
distances, indices = NN.kneighbors(song_audio_features)
print(type(indices))
indices
# index = neighbors[1][0]
# print(neighbors[1])
recommended_list = list(songs.loc[indices[0], 'id'])
# how to search song names from ids
# on the ML side
recommended_song_list = []
for recommend in recommended_list:
#recommend_song_name=songs[songs['id']==recommend]['name']
recommend_song_name=songs[songs['id']==recommend]
recommended_song_list.append(recommend_song_name)
return recommended_song_list
# + colab={"base_uri": "https://localhost:8080/", "height": 96} id="34iV_ZVdpegd" outputId="58afccc9-0f8a-4de8-bcdd-b6f5b5e51ad2"
#double check this
# it is outputing index not song_id
output_rec_song_names = recommend_song_names('0DQd0tWurMHUAv0cMnDELH')
output_rec_song_names[0]
# for i in range(10):
# print(output_rec_song_names[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 78} id="sbS0hCW3VK7j" outputId="387fa458-2c8b-4fde-fa93-5430ec071749"
output_rec_song_names[1]
# + colab={"base_uri": "https://localhost:8080/", "height": 78} id="c82o0ZNeVNfs" outputId="6915c4bb-21a5-41f9-e07e-43964dda3fb0"
output_rec_song_names[2]
# + colab={"base_uri": "https://localhost:8080/", "height": 78} id="IhrodIfAVNlI" outputId="8e5589a8-f383-4538-ac2c-4b4a35bc269e"
output_rec_song_names[3]
# + colab={"base_uri": "https://localhost:8080/", "height": 78} id="VlS6NNlOVNiS" outputId="0c5b1ed3-dc7d-4f08-9a3b-47380ad33b78"
output_rec_song_names[4]
# + colab={"base_uri": "https://localhost:8080/", "height": 78} id="a6xYHFeSVNce" outputId="0e18ad1f-4fa5-40dc-a8ab-47dacddb868f"
output_rec_song_names[5]
# + [markdown] id="oqDO5SFc_Tna"
# # Pickle Dataset songs and Model NN
#
# DATA ENGINEER will need
#
# 1. 'song_dataset.pkl'
# 2. 'recommendation_model.sav'
# 3. recommendation.py
#
#
# + id="U_JQYjiJ_0mp"
import pickle
# + id="OFKJCSW9_06I"
# Pickel Dataset songs
with open('song_dataset.pkl', 'wb') as pickle_file:
pickle.dump(songs, pickle_file)
# + id="q36s6vVZpeQp"
# Pickle Model NN
NN = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')
NN.fit(songs[songs.columns[3:]])
# save the model to disk
filename = 'recommendation_model.sav'
pickle.dump(NN, open(filename, 'wb'))
# + id="C7Jr3UuS_DaR"
import pickle
import pandas as pd
#for app
# change '/content/' ====> './<name_of_folder_with__init__.py>/
filename = '/content/recommendation_model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
songs_pkl = pd.read_pickle("/content/song_dataset.pkl")
def suggest_song_ids(track_id):
# from track_id to audio_features
song_index = songs_pkl.index[songs_pkl['id'] == track_id]
audio_features = songs_pkl.iloc[song_index, 3:].to_numpy()
# recommendation model
distances, indices = loaded_model.kneighbors(audio_features)
recommended_list = list(songs_pkl.loc[indices[0], 'id'])
return recommended_list
# + colab={"base_uri": "https://localhost:8080/", "height": 199} id="sdsPXWDoBgfE" outputId="6cdcba78-750c-4c0f-96b2-b9edf73dfc0c"
list_o_ids = suggest_song_ids('2MuJbBWAVewREJmB8WdGJ3')
list_o_ids
# + id="qIuqZE2KWjYr"
# + id="ir4jzIHuWjdV"
# + id="c0RAB5pbWjbK"
# + [markdown] id="8Lsv7w_MYHAj"
# This section needs to be tested for shape and type
#
# will not work in app unless proper type and shape for output to HTML
#
# + id="lxxUt51kWjWK"
def find_recommended_songs(track_id):
# from track_id to audio_features
song_index = songs_pkl.index[songs_pkl['id'] == track_id]
audio_features = songs_pkl.iloc[song_index, 3:].to_numpy()
# recommendation model
distances, indices = loaded_model.kneighbors(audio_features) #.reshape(1, -1)
recommended_list = list(songs_pkl.loc[indices[0], 'id'])
# how to search song names from ids
# on the ML side
recommended_song_list = []
for recommend in recommended_list:
recommend_song_name=songs_pkl[songs_pkl['id']==recommend]
recommended_song_list.append(recommend_song_name)
return recommended_song_list
# + [markdown] id="tsCOe29U_3Mt"
# <NAME>
#
# + [markdown] id="bCTyqhwaB-Lh"
# 1. ['Ice Cube'] It Was A Good Day
# 2. ['Shaggy', 'Rayvon'] Angel
# 3. ['<NAME>']
# 4. ['<NAME>'] You're the Sweetest One
# 5. ['Talking Heads'] Seen and Not Seen - 2005 Remaster
# 6. ['Chronixx'] Here Comes Trouble
# 7. ['Los Cuates de Sinaloa'] El Alamo
# + colab={"base_uri": "https://localhost:8080/", "height": 564} id="ZWsL-TgZOi69" outputId="177107d3-e7d1-42ac-e201-3f91786db326"
data[data['name'] == 'It Was A Good Day']
# + [markdown] id="nKVWF7V_DM7H"
# <NAME>
# + colab={"base_uri": "https://localhost:8080/", "height": 163} id="E8VuO2zoDc5R" outputId="b26ce9bc-ce0f-42af-bdab-3fd24802b892"
data[data['name'] == 'The Distance']
# + colab={"base_uri": "https://localhost:8080/", "height": 581} id="VLuOMhg9Dm8W" outputId="a0ce9a0b-a3e5-4b38-854a-a657d6823fad"
list_o_songs = find_recommended_songs('0xMEF2WiqKWTIG7Krjungw')
list_o_songs
# + [markdown] id="SxsNHfbmDwkc"
# 1. ['Cake'] The Distance
# 2. ['Cake'] The Distance
# 3. ['Pleasure'] Joyous
# 4. ['Kansas'] Portrait (He Knew)
# 5. ['Radio Stars'] The Real Me
# 6. ['Blaggards'] Drunken Sailor
# 7. ['Matchbox Twenty'] Real World
# 8. ['Los Cuates de Sinaloa'] La Pelota de Lolita
# 9. ['The Easybeats'] Shes So Fine
# 10. ['Seals and Crofts'] Takin' It Easy
| ml_model/SpotifySongSuggestorUnit4Build.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 显示股票基本信息
# +
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.insert(0, '../qtrader/qtrader')
import pandas as pd
import numpy as np
import tushare as ts
from AData import SHSZData, SHSZSelection
import analysis
#import draw
import matplotlib.pyplot as plt
DATA_FOLRDER = '../data/SHSZ'
CODE='300628'
# -
datamanager = SHSZData(DATA_FOLRDER)
equity = datamanager.get_basic(CODE)
# ## 1. 获取基本ochlv数据
# +
df = datamanager.get_d(CODE)
def c_date(x):
return [pd.datetime.strptime(str(i), '%Y-%m-%d') for i in x]
df[['date']] = df[['date']].apply(c_date)
df = df.set_index('date')
# -
df.close.plot()
# ## 2. 获得每日涨跌幅分布统计
df['changeRatio'] = analysis.gen_change_ratio(df)
# ## 3. 获取每日振幅分布统计
df['amplitude'] = analysis.gen_amplitude(df)
# ## 4. 获取跳空缺口
# 跳空缺口:
x = df.loc['2017-04-17':]
x['dump_power'] = analysis.gen_jump_powers(x)
# ## 5. 平滑close
smoothed_cls = analysis.gen_smoothed_close(x['close'])
smoothed_cls.shape
# ## 6. 支撑线和阻碍线的获取
sup, res = analysis.gen_supres(x['close'])
# ## 7. 简单趋势分析
analysis.gen_trends(x['close'], 0.7)
# ## 8. 寻找拐点
# +
analysis.gen_stational_points(x['close'], num_class=3, delta=0.009, charts=True)
# -
| ipython/EquityAnalyse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Svyum7G3gHcK"
# ## Ungraded Lab: Convolutional Autoencoders
#
# In this lab, you will use convolution layers to build your autoencoder. This usually leads to better results than dense networks and you will see it in action with the [Fashion MNIST dataset](https://www.tensorflow.org/datasets/catalog/fashion_mnist).
# + [markdown] id="Jk0Tld-U5XFD"
# ## Imports
# + id="3EXwoz-KHtWO"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="e0WGuXlw5bK-"
# ## Prepare the Dataset
# + [markdown] id="aTySDKEhLNLY"
# As before, you will load the train and test sets from TFDS. Notice that we don't flatten the image this time. That's because we will be using convolutional layers later that can deal with 2D images.
# + id="t9F7YsCNIKSA"
def map_image(image, label):
'''Normalizes the image. Returns image as input and label.'''
image = tf.cast(image, dtype=tf.float32)
image = image / 255.0
return image, image
# + id="9ZsciqJXL368" colab={"base_uri": "https://localhost:8080/", "height": 371, "referenced_widgets": ["f8e320c0a49b4db1b3cdf79058f40ac2", "5ab1e33e948345e8a8a273524b2c9475", "fbab6969771c4d7fb02ea48bd7fff528", "<KEY>", "d6920ea10b804e8dbb11bb3ec103efa0", "8585bf370a9d4d82afe572197740738b", "518ca65da37545e98355bd61503ba509", "<KEY>", "32f35279c0e34a04a7fc534d6c0e9904", "<KEY>", "eec84037edd7463fb38c35402a148fc8", "1677ef9392f145a29e55611f66c455ae", "0721c372a32f46f1b4b817dea4b8b1aa", "d315f1a68ab540268ee8b4a6b55b933c", "<KEY>", "508d2d59d9ac44f1812e5d6cd745b486", "0db1d963c96549dbb9e26d4b37a8da6d", "<KEY>", "42e10f3503f64cbe8175cc37540e66bf", "b1f376ba8ed748b880e0c4901f3c51ee", "8bd5e7ca69494a6b8a1e964325416d90", "2e2d18b7ed7848489f44bf5b8edb12b5", "0dce67928dea4a6d887c0516ef1cd0d2", "<KEY>", "e9db3ec39871438dac920e55e50c3f99", "2cea6c3762a842e4aa92fa950664e893", "<KEY>", "4dd4abad7c1e4aa4bc501de2596393c4", "<KEY>", "2072d7180d6c4f1db0e7ecea47f3f9fe", "cfee4743a7ee4ffba4924b4e5afea53e", "<KEY>", "<KEY>", "26f452ab1e4f4150b6ef4e408f2739bd", "<KEY>", "dd463e610ec64fa7b1b53c6e49cc21e5", "b66d0d7551b646af8d5cc42aafe1cc00", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "fbe74dd1a5234135bb9a0bed5502b003", "<KEY>", "35ac7e69a51644b7be94364352e2d65b", "<KEY>", "<KEY>", "<KEY>", "f061b729e27640c09e9469df315a423b", "<KEY>", "84f706d6faaa4ba5af49e0d7b430224a", "<KEY>", "85cbe3fe969b462aa5cfc3e75c4de37d", "<KEY>", "e05190427d2048578b9f30c4a0cc4934", "83be79673ad14998b3f222a7978697ed"]} outputId="41c9bd2d-f6fe-44be-8c54-9957ec5ee999"
BATCH_SIZE = 128
SHUFFLE_BUFFER_SIZE = 1024
train_dataset = tfds.load('fashion_mnist', as_supervised=True, split="train")
train_dataset = train_dataset.map(map_image)
train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE).repeat()
test_dataset = tfds.load('fashion_mnist', as_supervised=True, split="test")
test_dataset = test_dataset.map(map_image)
test_dataset = test_dataset.batch(BATCH_SIZE).repeat()
# + [markdown] id="uoyz09uKMDn5"
# ## Define the Model
# + [markdown] id="V1-Fw_qnZPV7"
# As mentioned, you will use convolutional layers to build the model. This is composed of three main parts: encoder, bottleneck, and decoder. You will follow the configuration shown in the image below.
# + [markdown] id="568W0TYyY9nl"
# <img src="https://drive.google.com/uc?export=view&id=15zh7bst9KKvciRdCvMAH7kXt3nNkABzO" width="75%" height="75%"/>
# + [markdown] id="O2IvtyIoZnb4"
# The encoder, just like in previous labs, will contract with each additional layer. The features are generated with the Conv2D layers while the max pooling layers reduce the dimensionality.
# + id="wxh8h-UMk2iL"
def encoder(inputs):
'''Defines the encoder with two Conv2D and max pooling layers.'''
conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same')(inputs)
max_pool_1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv_1)
conv_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu', padding='same')(max_pool_1)
max_pool_2 = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv_2)
return max_pool_2
# + [markdown] id="g9KQYnabazLl"
# A bottleneck layer is used to get more features but without further reducing the dimension afterwards. Another layer is inserted here for visualizing the encoder output.
# + id="wRWmLA3VliDr"
def bottle_neck(inputs):
'''Defines the bottleneck.'''
bottle_neck = tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), activation='relu', padding='same')(inputs)
encoder_visualization = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), activation='sigmoid', padding='same')(bottle_neck)
return bottle_neck, encoder_visualization
# + [markdown] id="FayvcE3ebZxk"
# The decoder will upsample the bottleneck output back to the original image size.
# + id="XZgLt5uAmArk"
def decoder(inputs):
'''Defines the decoder path to upsample back to the original image size.'''
conv_1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu', padding='same')(inputs)
up_sample_1 = tf.keras.layers.UpSampling2D(size=(2,2))(conv_1)
conv_2 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same')(up_sample_1)
up_sample_2 = tf.keras.layers.UpSampling2D(size=(2,2))(conv_2)
conv_3 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), activation='sigmoid', padding='same')(up_sample_2)
return conv_3
# + [markdown] id="Dvfhvk9qbvCp"
# You can now build the full autoencoder using the functions above.
# + id="fQKwO64iiOYl"
def convolutional_auto_encoder():
'''Builds the entire autoencoder model.'''
inputs = tf.keras.layers.Input(shape=(28, 28, 1,))
encoder_output = encoder(inputs)
bottleneck_output, encoder_visualization = bottle_neck(encoder_output)
decoder_output = decoder(bottleneck_output)
model = tf.keras.Model(inputs =inputs, outputs=decoder_output)
encoder_model = tf.keras.Model(inputs=inputs, outputs=encoder_visualization)
return model, encoder_model
# + id="1MmS7r0tkuIf" colab={"base_uri": "https://localhost:8080/"} outputId="8fd022d8-6030-4805-b2ac-95171d5d0814"
convolutional_model, convolutional_encoder_model = convolutional_auto_encoder()
convolutional_model.summary()
# + [markdown] id="5FRxRr0LMLCs"
# ## Compile and Train the model
# + id="J0Umj_xaiHL_" colab={"base_uri": "https://localhost:8080/"} outputId="dc4a8c0d-8d48-4c91-c243-9b46f8e2ce16"
train_steps = 60000 // BATCH_SIZE
valid_steps = 60000 // BATCH_SIZE
convolutional_model.compile(optimizer=tf.keras.optimizers.Adam(), loss='binary_crossentropy')
conv_model_history = convolutional_model.fit(train_dataset, steps_per_epoch=train_steps, validation_data=test_dataset, validation_steps=valid_steps, epochs=40)
# + [markdown] id="-8zE9OiAMUd7"
# ## Display sample results
# + [markdown] id="DCUOM7F_cf26"
# As usual, let's see some sample results from the trained model.
# + id="A35RlIqKIsQv"
def display_one_row(disp_images, offset, shape=(28, 28)):
'''Display sample outputs in one row.'''
for idx, test_image in enumerate(disp_images):
plt.subplot(3, 10, offset + idx + 1)
plt.xticks([])
plt.yticks([])
test_image = np.reshape(test_image, shape)
plt.imshow(test_image, cmap='gray')
def display_results(disp_input_images, disp_encoded, disp_predicted, enc_shape=(8,4)):
'''Displays the input, encoded, and decoded output values.'''
plt.figure(figsize=(15, 5))
display_one_row(disp_input_images, 0, shape=(28,28,))
display_one_row(disp_encoded, 10, shape=enc_shape)
display_one_row(disp_predicted, 20, shape=(28,28,))
# + id="qtQyQRxRN_hH" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="46bd3c11-e102-4d9b-8d11-6bea7c7624de"
# take 1 batch of the dataset
test_dataset = test_dataset.take(1)
# take the input images and put them in a list
output_samples = []
for input_image, image in tfds.as_numpy(test_dataset):
output_samples = input_image
# pick 10 indices
idxs = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
# prepare test samples as a batch of 10 images
conv_output_samples = np.array(output_samples[idxs])
conv_output_samples = np.reshape(conv_output_samples, (10, 28, 28, 1))
# get the encoder ouput
encoded = convolutional_encoder_model.predict(conv_output_samples)
# get a prediction for some values in the dataset
predicted = convolutional_model.predict(conv_output_samples)
# display the samples, encodings and decoded values!
display_results(conv_output_samples, encoded, predicted, enc_shape=(7,7))
| Generative Deep Learning with TensorFlow/Week 2 AutoEncoders/Lab_4_FashionMNIST_CNNAutoEncoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from tensorflow.keras.models import load_model
import cv2 as cv
import numpy as np
face=cv.CascadeClassifier("haarcascade_frontalface_default.xml")
model=load_model("15_points_model.h5")
def preprocessing(images):
images=images/255.0
images=images.astype(np.float32)
return images.reshape(1,96,96,1)
# +
cam=cv.VideoCapture(0)
while True:
ret,frame=cam.read()
temp=frame.copy()
img_copy=frame.copy()
faces=face.detectMultiScale(temp)
for (x,y,w,h) in faces:
#cv.rectangle(temp,(x,y),(x+z,y+w),color=(0,255,255))
#cv.imshow('frame',temp)
try:
new_frame=frame[y:y+h,x:x+w,:].copy()
#cv.imshow('abc', new_frame)
original=new_frame.shape
new_frame=cv.resize(new_frame,(96,96))
#cv.imshow('new',new_frame)
gray_frame=cv.cvtColor(new_frame,cv.COLOR_BGR2GRAY)
gray_frame=preprocessing(gray_frame)
output=model.predict(gray_frame)
width_original = new_frame.shape[1]
height_original = new_frame.shape[0]
keypoints = output[0]
x_coords = keypoints[0::2] # Read alternate elements starting from index 0
y_coords = keypoints[1::2] # Read alternate elements starting from index 1
x_coords_denormalized = (x_coords+0.5)*width_original # Denormalize x-coordinate
y_coords_denormalized = (y_coords+0.5)*height_original # Denormalize y-coordinate
'''
left_lip_coords = (int(x_coords_denormalized[11]), int(y_coords_denormalized[11]))
right_lip_coords = (int(x_coords_denormalized[12]), int(y_coords_denormalized[12]))
top_lip_coords = (int(x_coords_denormalized[13]), int(y_coords_denormalized[13]))
bottom_lip_coords = (int(x_coords_denormalized[14]), int(y_coords_denormalized[14]))
left_eye_coords = (int(x_coords_denormalized[3]), int(y_coords_denormalized[3]))
right_eye_coords = (int(x_coords_denormalized[5]), int(y_coords_denormalized[5]))
brow_coords = (int(x_coords_denormalized[6]), int(y_coords_denormalized[6]))
'''
left_lip_coordsx = int((output[0][22])*48+48)
left_lip_coordsy = int((output[0][23])*48+48)
right_lip_coordsx = int((output[0][24])*48+48)
right_lip_coordsy = int((output[0][25])*48+48)
upper_lip_coordsx = int((output[0][26])*48+48)
upper_lip_coordsy = int((output[0][27])*48+48)
# Scale filter according to keypoint coordinates
beard_width = right_lip_coordsx - left_lip_coordsx
#glasses_width = right_eye_coords[0] - left_eye_coords[0]
img_copy = cv.cvtColor(img_copy, cv.COLOR_BGR2BGRA)
santa_filter = cv.imread('santa_filter.png', -1)
santa_filter = cv.resize(santa_filter, (int(abs(beard_width*7)),170))
sw,sh,sc = santa_filter.shape
for i in range(0,sw): # Overlay the filter based on the alpha channel
for j in range(0,sh):
if santa_filter[i,j][3] != 0:
#img_copy[left_lip_coordsx+j+x-60 , upper_lip_coordsy+i+y-20] = santa_filter[i,j]
img_copy[upper_lip_coordsy+i+y+40, left_lip_coordsx+j+x-62] = santa_filter[i,j]
#print()
'''
glasses = cv.imread('glasses.png', -1)
glasses = cv.resize(glasses, (int(abs(glasses_width*2)),150))
gw,gh,gc = glasses.shape
for i in range(0,gw): # Overlay the filter based on the alpha channel
for j in range(0,gh):
if glasses[i,j][3] != 0:
img_copy[brow_coords[1]+i+y-50, left_eye_coords[0]+j+x-60] = glasses[i,j]
'''
diwali = cv.imread('diwali6.png', -1)
#diwali = cv.cvtColor(diwali, cv.COLOR_BGR2BGRA)
#diwali1 = cv.resize(diwali, (240,160))
cv.imshow('def' , diwali)
dw,dh,dc = diwali.shape
left_brow_outer_coordsx = int((output[0][14])*48+48)
left_brow_outer_coordsy = int((output[0][15])*48+48)
for i in range(0,dw): # Overlay the filter based on the alpha channel
for j in range(0,dh):
if diwali[i,j][3] != 0:
img_copy[left_brow_outer_coordsy+i+y, left_brow_outer_coordsx+j+x + 60] = diwali[i,j]
print(diwali.shape)
cv.imshow('Op', img_copy)
#for i in range(0,len(output[0]),2):
# cv.circle(img=new_frame,
# center=(int((output[0][i])*48+48),int((output[0][i+1])*48+48)),
# radius=2,color=(255,255,255))
#cv.imshow('output',new_frame)
cv.circle(img=new_frame,
center=(upper_lip_coordsx,upper_lip_coordsy),
radius=2,color=(255,255,255))
cv.circle(img=new_frame,
center=(right_lip_coordsx,right_lip_coordsy),
radius=2,color=(255,255,255))
cv.circle(img=new_frame,
center=(left_lip_coordsx,left_lip_coordsy),
radius=2,color=(255,255,255))
frame[y:y+h, x:x+w,:]=cv.resize(new_frame,tuple(original)[:-1])
cv.imshow('frame1',frame)
except:
pass
if(cv.waitKey(1)&0xff==ord('q')):
break
cam.release()
cv.destroyAllWindows()
# -
| Facial landmark 15.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Package Better with Conda Build 3
# Handling version compatibility is one of the hardest challenges in building software. Up to now, conda-build provided helpful tools in terms of the ability to constrain or pin versions in recipes. The limiting thing about this capability was that it entailed editing a lot of recipes. Conda-build 3 introduces a new scheme for controlling version constraints, which enhances behavior in two ways. First, you can now set versions in an external file, and you can provide lists of versions for conda-build to loop over. Matrix builds are now much simpler and no longer require an external tool, such as conda-build-all. Second, there have been several new Jinja2 functions added, which allow recipe authors to express their constraints relative to the versions of packages installed at build time. This dynamic expression greatly cuts down on the need for editing recipes.
#
# Each of these developments have enabled interesting new capabilities for cross-compiling, as well as improving package compatibility by adding more intelligent constraints.
# This document is intended as a quick overview of new features in conda-build 3. For more information, see the docs PR at https://conda.io/docs/building/variants.html
#
# These demos use conda-build's python API to render and build recipes. That API currently does not have a docs page, but is pretty self explanatory. See the source at https://github.com/conda/conda-build/blob/master/conda_build/api.py
#
# This jupyter notebook itself is included in conda-build's tests folder. If you're interested in running this notebook yourself, see the tests/test-recipes/variants folder in a git checkout of the conda-build source. Tests are not included with conda packages of conda-build.
from conda_build import api
import os
from pprint import pprint
# First, set up some helper functions that will output recipe contents in a nice-to-read way:
def print_yamls(recipe, **kwargs):
yamls = [api.output_yaml(m[0])
for m in api.render(recipe, verbose=False, permit_unsatisfiable_variants=True, **kwargs)]
for yaml in yamls:
print(yaml)
print('-' * 50)
def print_outputs(recipe, **kwargs):
pprint(api.get_output_file_paths(recipe, verbose=False, **kwargs))
# Most of the new functionality revolves around much more powerful use of jinja2 templates. The core idea is that there is now a separate configuration file that can be used to insert many different entries into your meta.yaml files.
# !cat 01_basic_templating/meta.yaml
# The configuration is hierarchical - it can draw from many config files. One place they can live is alongside meta.yaml:
# !cat 01_basic_templating/conda_build_config.yaml
# Since we have one slot in meta.yaml, and two values for that one slot, we should end up with two output packages:
print_outputs('01_basic_templating/')
print_yamls('01_basic_templating/')
# OK, that's fun already. But wait, there's more!
#
# We saw a warning about "finalization." That's conda-build trying to figure out exactly what packages are going to be installed for the build process. This is all determined before the build. Doing so allows us to tell you the actual output filenames before you build anything. Conda-build will still render recipes if some dependencies are unavailable, but you obviously won't be able to actually build that recipe.
# !cat 02_python_version/meta.yaml
# !cat 02_python_version/conda_build_config.yaml
print_yamls('02_python_version/')
# Here you see that we have many more dependencies than we specified, and we have much more detailed pinning. This is a finalized recipe. It represents exactly the state that would be present for building (at least on the current platform).
#
# So, this new way to pass versions is very fun, but there's a lot of code out there that uses the older way of doing things - environment variables and CLI arguments. Those still work. They override any conda_build_config.yaml settings.
# Setting environment variables overrides the conda_build_config.yaml. This preserves older, well-established behavior.
os.environ["CONDA_PY"] = "3.4"
print_yamls('02_python_version/')
del os.environ['CONDA_PY']
# passing python as an argument (CLI or to the API) also overrides conda_build_config.yaml
print_yamls('02_python_version/', python="3.6")
# Wait a minute - what is that ``h7d013e7`` gobbledygook in the build/string field?
#
# Conda-build 3 aims to generalize pinning/constraints. Such constraints differentiate a package. For example, in the past, we have had things like py27np111 in filenames. This is the same idea, just generalized. Since we can't readily put every possible constraint into the filename, we have kept the old ones, but added the hash as a general solution.
#
# There's more information about what goes into a hash at https://conda.io/docs/building/variants.html#differentiating-packages-built-with-different-variants
#
# Let's take a look at how to inspect the hash contents of a built package.
outputs = api.build('02_python_version/', python="3.6",
anaconda_upload=False)
pkg_file = outputs[0]
print(pkg_file)
# using command line here just to show you that this command exists.
# !conda inspect hash-inputs ~/miniconda3/conda-bld/osx-64/abc-1.0-py36hd0a5620_0.tar.bz2
# pin_run_as_build is a special extra key in the config file. It is a generalization of the ``x.x`` concept that existed for numpy since 2015. There's more information at https://conda.io/docs/building/variants.html#customizing-compatibility
#
# Each x indicates another level of pinning in the output recipe. Let's take a look at how we can control the relationship of these constraints. Before now you could certainly accomplish pinning, it just took more work. Now you can define your pinning expressions, and then change your target versions only in one config file.
# !cat 05_compatible/meta.yaml
# This is effectively saying "add a runtime libpng constraint that follows conda-build's default behavior, relative to the version of libpng that was used at build time"
#
# pin_compatible is a new helper function available to you in meta.yaml. The default behavior is: exact version match lower bound ("x.x.x.x.x.x.x"), next major version upper bound ("x")
print_yamls('05_compatible/')
# These constraints are completely customizable with pinning expressions:
# !cat 06_compatible_custom/meta.yaml
print_yamls('06_compatible_custom/')
# Finally, you can also manually specify version bounds. These supersede any relative constraints.
# !cat 07_compatible_custom_lower_upper/meta.yaml
print_yamls('07_compatible_custom_lower_upper/')
# Much of the development of conda-build 3 has been inspired by improving the compiler toolchain situation. Conda-build 3 adds special support for more dynamic specification of compilers.
# !cat 08_compiler/meta.yaml
# By replacing any actual compiler with this jinja2 function, we're free to swap in different compilers based on the contents of the conda_build_config.yaml file (or other variant configuration). Rather than saying "I need gcc," we are saying "I need a C compiler."
#
# By doing so, recipes are much more dynamic, and conda-build also helps to keep your recipes in line with respect to runtimes. We're also free to keep compilation and linking flags associated with specific "compiler" packages - allowing us to build against potentially multiple configurations (Release, Debug?). With cross compilers, we could also build for other platforms.
# !cat 09_cross/meta.yaml
# but by adding in a base compiler name, and target platforms, we can make a build matrix
# This is not magic, the compiler packages must already exist. Conda-build is only following a naming scheme.
# !cat 09_cross/conda_build_config.yaml
print_yamls('09_cross/')
# Finally, it is frequently a problem to remember to add runtime dependencies. Sometimes the recipe author is not entirely familiar with the lower level code, and has no idea about runtime dependencies. Other times, it's just a pain to keep versions of runtime dependencies in line. Conda-build 3 introduces a way of storing the required runtime dependencies *on the package providing the dependency at build time.*
#
# For example, using g++ in a non-static configuration will require that the end-user have a sufficiently new libstdc++ runtime library available at runtime. Many people don't currently include this in their recipes. Sometimes the system libstdc++ is adequate, but often not. By imposing the downstream dependency, we can make sure that people don't forget the runtime dependency.
# First, a package that provides some library.
# When anyone uses this library, they need to include the appropriate runtime.
# !cat 10_runtimes/uses_run_exports/meta.yaml
# This is the simple downstream package that uses the library provided in the previous recipe.
# !cat 10_runtimes/consumes_exports/meta.yaml
# let's build the former package first.
api.build('10_runtimes/uses_run_exports', anaconda_upload=False)
print_yamls('10_runtimes/consumes_exports')
# In the above recipe, note that bzip2 has been added as a runtime dependency, and is pinned according to conda-build's default pin_compatible scheme. This behavior can be overridden in recipes if necessary, but we hope it will prove useful.
| tests/test-recipes/variants/conda-build-demo.ipynb |