markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
pun wunnayook
pun_wunayook('กา') pun_wunayook('กาไปไหน') pun_wunayook('ขาวจังเลย') for k in case_1: print(k) print(pun_wunayook(k)) print('===========')
WARNING:root:มะ with tone 0 not availabe (Dead word type), return normalize WARNING:root:ดุ๊ด with tone 0 not availabe (Dead word type), return normalize WARNING:root:removing taikoo from เป็น WARNING:root:removing taikoo from เป็น WARNING:root:removing taikoo from เป็น WARNING:root:removing taikoo from เป็น
MIT
notebooks/Example.ipynb
Theerit/kampuan_api
load a model
# load json and create model # load json and create model def load_model(filename, weights): with open(filename, 'r') as json: # cnn_transfer_augm loaded_model_json = json.read() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(weights) print("Loaded model from disk") optimizer = optimizers.Adam(lr=0.001) loaded_model.compile(loss = "categorical_crossentropy", optimizer = optimizer, metrics=['accuracy', 'mean_squared_error','categorical_crossentropy','top_k_categorical_accuracy']) print('compiled model') return loaded_model model_augment = config.dataset_dir + 'models/cnntransfer_augm.json' model_augment_weights = config.dataset_dir + 'models/cnntransferweights_augmen.h5' model_default = config.dataset_dir + 'models/cnntransfer.json' model_default_weights = config.dataset_dir + 'models/cnntransferweights.h5' # augment = load_model(model_augment, model_augment_weights) default = load_model(model_default, model_default_weights) augment = load_model(model_augment, model_augment_weights) # pick the n classes with the most occuring instances amt = 5 classes = data.top_classes(dataset.labels, amt) classes maxx = 100 max_train = 100 x_test, n = data.extract_topx_classes(dataset, classes, 'test', maxx, max_train) n x_test, y_test, n = data.extract_all_test(dataset, x_test) # y_train, y_test, y_validation = data.labels_to_vectors(dataset, y_train, y_test, y_validation) y_test = data.one_hot(y_test) input_shape = y_test.shape[1:] # = shape of an individual image (matrix) output_length = (y_test[0]).shape[0] # = length of an individual label output_length
_____no_output_____
MIT
src/nn/transfer learning-plots.ipynb
voschezang/trash-image-classification
running tests
# import sklearn.metrics.confusion_matrix def evaluate(model): cvscores = [] scores = model.evaluate(x_test, y_test, verbose=0) print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) cvscores.append(scores[1] * 100) print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores))) # evaluate(model_final_augmentation) import tensorflow as tf from sklearn.metrics import confusion_matrix def test1(model, x_test, y_test): y_pred_class = model.predict(x_test) # con = tf.confusion_matrix(labels=y_test, predictions=y_pred_class ) # print(con) y_test_non_category = [ np.argmax(t) for t in y_test ] y_predict_non_category = [ np.argmax(t) for t in y_pred_class ] conf_mat = confusion_matrix(y_test_non_category, y_predict_non_category) print(conf_mat) return conf_mat c1 = test1(default, x_test, y_test) c2 = test1(augment, x_test, y_test) # http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html # comparable but different from: mlxtend.plotting.plot_confusion_matrix import itertools import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target class_names = iris.target_names # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01) y_pred = classifier.fit(X_train, y_train).predict(X_test) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') labels = np.array(['Glass','Paper','Cardboard','Plastic','Metal']) labels = np.array(['Paper', 'Glass', 'Plastic', 'Metal', 'Cardboard']) from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = [ 'Times New Roman', 'Tahoma', 'DejaVu Sans', 'Lucida Grande', 'Verdana' ] rcParams['font.size'] = 12 labels c_ = c1 c1 = np.array([[29, 1, 1, 24, 5], [ 0, 41, 2, 1, 16], [ 0, 11, 34, 1, 14], [ 0, 0, 0, 55, 5], [ 0, 5, 0, 4, 51]]) # from mlxtend.plotting import plot_confusion_matrix plt.figure() plot_confusion_matrix(c1, labels, title='Confusion matrix - default') c2 = np.array([[ 3, 2, 5, 85, 5], [ 0, 76, 19, 2, 3,], [ 0, 13, 72, 9, 6,], [ 0, 0, 2, 95, 3,], [ 0, 36, 22, 5, 37]]) # from mlxtend.plotting import plot_confusion_matrix plt.figure() plot_confusion_matrix(c2, labels, title='Confusion matrix - augmented')
Confusion matrix, without normalization [[ 3 2 5 85 5] [ 0 76 19 2 3] [ 0 13 72 9 6] [ 0 0 2 95 3] [ 0 36 22 5 37]]
MIT
src/nn/transfer learning-plots.ipynb
voschezang/trash-image-classification
T-teststtest for the TP per class, between the 2 networks
tp_c1 = c1.diagonal() tp_c2 = c2.diagonal() print(tp_c1) print(tp_c2) from utils import utils utils.ttest(0.05, tp_c1, tp_c2) utils.ttest(0.05, tp_c1.flatten(), tp_c2.flatten()) def select_not_diagonal(arr=[]): a = arr.copy() np.fill_diagonal(a, -1) return [x for x in list(a.flatten()) if x > -1] # everything nog at the diagonal axes is either fp or fn # with fn or fp depending on the perspective (which class == p) c1_ = select_not_diagonal(c1) c2_ = select_not_diagonal(c2) print(c1_) print(c2_) utils.ttest(0.05, c1_, c2_) def recall_precision(cm=[[]]): print('label, recall, precision') total = sum(cm.flatten()) for i, label in enumerate(labels): # e.g. label = paper true_paper = cm[i] tp = cm[i][i] # upper left corner fp = sum(cm[i]) - tp # upper col minus tp # vertical col col = [row[i] for row in cm ] fn = sum(col) - tp tn = total - tp - fp - fn print(label, ':', round(tp * 1./ (tp + fn),3), round(tp * 1./ (tp + fp),3)) # print(round(tp * 1./ (tp + fp),3)) print('c1 - no aug') recall_precision(c1) print('c2 - aug') recall_precision(c2)
c1 - no aug label, recall, precision Paper : 1.0 0.483 Glass : 0.707 0.683 Plastic : 0.919 0.567 Metal : 0.647 0.917 Cardboard : 0.56 0.85 c2 - aug label, recall, precision Paper : 1.0 0.03 Glass : 0.598 0.76 Plastic : 0.6 0.72 Metal : 0.485 0.95 Cardboard : 0.685 0.37
MIT
src/nn/transfer learning-plots.ipynb
voschezang/trash-image-classification
Module 2: Playing with pytorch: linear regression
import matplotlib.pyplot as plt %matplotlib inline import torch import numpy as np torch.__version__
_____no_output_____
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Warm-up: Linear regression with numpy Our model is:$$y_t = 2x^1_t-3x^2_t+1, \quad t\in\{1,\dots,30\}$$Our task is given the 'observations' $(x_t,y_t)_{t\in\{1,\dots,30\}}$ to recover the weights $w^1=2, w^2=-3$ and the bias $b = 1$.In order to do so, we will solve the following optimization problem:$$\underset{w^1,w^2,b}{\operatorname{argmin}} \sum_{t=1}^{30} \left(w^1x^1_t+w^2x^2_t+b-y_t\right)^2$$
import numpy as np from numpy.random import random # generate random input data x = random((30,2)) # generate labels corresponding to input data x y = np.dot(x, [2., -3.]) + 1. w_source = np.array([2., -3.]) b_source = np.array([1.]) print(x.shape) print(y.shape) print(np.array([2., -3.]).shape) print(x[-5:]) print(x[:5]) import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D def plot_figs(fig_num, elev, azim, x, y, weights, bias): fig = plt.figure(fig_num, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, elev=elev, azim=azim) ax.scatter(x[:, 0], x[:, 1], y) ax.plot_surface(np.array([[0, 0], [1, 1]]), np.array([[0, 1], [0, 1]]), (np.dot(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]).T, weights) + bias).reshape((2, 2)), alpha=.5) ax.set_xlabel('x_1') ax.set_ylabel('x_2') ax.set_zlabel('y') def plot_views(x, y, w, b): #Generate the different figures from different views elev = 43.5 azim = -110 plot_figs(1, elev, azim, x, y, w, b[0]) plt.show() plot_views(x, y, w_source, b_source)
_____no_output_____
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
In vector form, we define:$$\hat{y}_t = {\bf w}^T{\bf x}_t+b$$and we want to minimize the loss given by:$$loss = \sum_t\underbrace{\left(\hat{y}_t-y_t \right)^2}_{loss_t}.$$To minimize the loss we first compute the gradient of each $loss_t$:\begin{eqnarray*}\frac{\partial{loss_t}}{\partial w^1} &=& 2x^1_t\left({\bf w}^T{\bf x}_t+b-y_t \right)\\\frac{\partial{loss_t}}{\partial w^2} &=& 2x^2_t\left({\bf w}^T{\bf x}_t+b-y_t \right)\\\frac{\partial{loss_t}}{\partial b} &=& 2\left({\bf w}^T{\bf x}_t+b-y_t \right)\end{eqnarray*}Note that the actual gradient of the loss is given by:$$\frac{\partial{loss}}{\partial w^1} =\sum_t \frac{\partial{loss_t}}{\partial w^1},\quad\frac{\partial{loss}}{\partial w^2} =\sum_t \frac{\partial{loss_t}}{\partial w^2},\quad\frac{\partial{loss}}{\partial b} =\sum_t \frac{\partial{loss_t}}{\partial b}$$For one epoch, **(Batch) Gradient Descent** updates the weights and bias as follows:\begin{eqnarray*}w^1_{new}&=&w^1_{old}-\alpha\frac{\partial{loss}}{\partial w^1} \\w^2_{new}&=&w^2_{old}-\alpha\frac{\partial{loss}}{\partial w^2} \\b_{new}&=&b_{old}-\alpha\frac{\partial{loss}}{\partial b},\end{eqnarray*}and then we run several epochs.
# randomly initialize learnable weights and bias w_init = random(2) b_init = random(1) w = w_init b = b_init print("initial values of the parameters:", w, b ) # our model forward pass def forward(x): return x.dot(w)+b # Loss function def loss(x, y): y_pred = forward(x) return (y_pred - y)**2 print("initial loss:", np.sum([loss(x_val,y_val) for x_val, y_val in zip(x, y)]) ) # compute gradient def gradient(x, y): # d_loss/d_w, d_loss/d_c return 2*(x.dot(w)+b - y)*x, 2 * (x.dot(w)+b - y) learning_rate = 1e-2 # Training loop for epoch in range(10): grad_w = np.array([0,0]) grad_b = np.array(0) l = 0 for x_val, y_val in zip(x, y): grad_w = np.add(grad_w,gradient(x_val, y_val)[0]) grad_b = np.add(grad_b,gradient(x_val, y_val)[1]) l += loss(x_val, y_val) w = w - learning_rate * grad_w b = b - learning_rate * grad_b print("progress:", "epoch:", epoch, "loss",l[0]) # After training print("estimation of the parameters:", w, b) plot_views(x, y, w, b)
_____no_output_____
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Linear regression with tensors
dtype = torch.FloatTensor print(dtype) # dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU x_t = torch.from_numpy(x).type(dtype) y_t = torch.from_numpy(y).type(dtype).unsqueeze(1) print(y.shape) print(torch.from_numpy(y).type(dtype).shape) print(y_t.shape)
(30,) torch.Size([30]) torch.Size([30, 1])
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
This is an implementation of **(Batch) Gradient Descent** with tensors.Note that in the main loop, the functions loss_t and gradient_t are always called with the same inputs: they can easily be incorporated into the loop (we'll do that below).
w_init_t = torch.from_numpy(w_init).type(dtype) b_init_t = torch.from_numpy(b_init).type(dtype) w_t = w_init_t.clone() w_t.unsqueeze_(1) b_t = b_init_t.clone() b_t.unsqueeze_(1) print("initial values of the parameters:\n", w_t, b_t ) # our model forward pass def forward_t(x): return x.mm(w_t)+b_t # Loss function def loss_t(x, y): y_pred = forward_t(x) return (y_pred - y).pow(2).sum() # compute gradient def gradient_t(x, y): # d_loss/d_w, d_loss/d_c return 2*torch.mm(torch.t(x),x.mm(w_t)+b_t - y), 2 * (x.mm(w_t)+b_t - y).sum() learning_rate = 1e-2 for epoch in range(10): l_t = loss_t(x_t,y_t) grad_w, grad_b = gradient_t(x_t,y_t) w_t = w_t-learning_rate*grad_w b_t = b_t-learning_rate*grad_b print("progress:", "epoch:", epoch, "loss",l_t) # After training print("estimation of the parameters:", w_t, b_t )
progress: epoch: 0 loss tensor(26.0386) progress: epoch: 1 loss tensor(16.9264) progress: epoch: 2 loss tensor(15.5589) progress: epoch: 3 loss tensor(14.4637) progress: epoch: 4 loss tensor(13.4501) progress: epoch: 5 loss tensor(12.5081) progress: epoch: 6 loss tensor(11.6326) progress: epoch: 7 loss tensor(10.8187) progress: epoch: 8 loss tensor(10.0622) progress: epoch: 9 loss tensor(9.3590) estimation of the parameters: tensor([[ 1.1006], [-1.0205]]) tensor([[0.5421]])
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Linear regression with Autograd
# Setting requires_grad=True indicates that we want to compute gradients with # respect to these Tensors during the backward pass. w_v = w_init_t.clone().unsqueeze(1) w_v.requires_grad_(True) b_v = b_init_t.clone().unsqueeze(1) b_v.requires_grad_(True) print("initial values of the parameters:", w_v.data, b_v.data )
initial values of the parameters: tensor([[0.9705], [0.0264]]) tensor([[0.6573]])
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
An implementation of **(Batch) Gradient Descent** without computing explicitly the gradient and using autograd instead.
for epoch in range(10): y_pred = x_t.mm(w_v)+b_v loss = (y_pred - y_t).pow(2).sum() # Use autograd to compute the backward pass. This call will compute the # gradient of loss with respect to all Variables with requires_grad=True. # After this call w.grad and b.grad will be tensors holding the gradient # of the loss with respect to w and b respectively. loss.backward() # Update weights using gradient descent. For this step we just want to mutate # the values of w_v and b_v in-place; we don't want to build up a computational # graph for the update steps, so we use the torch.no_grad() context manager # to prevent PyTorch from building a computational graph for the updates with torch.no_grad(): w_v -= learning_rate * w_v.grad b_v -= learning_rate * b_v.grad # Manually zero the gradients after updating weights # otherwise gradients will be acumulated after each .backward() w_v.grad.zero_() b_v.grad.zero_() print("progress:", "epoch:", epoch, "loss",loss.data.item()) # After training print("estimation of the parameters:\n", w_v.data, b_v.data.t() )
progress: epoch: 0 loss 26.03858184814453 progress: epoch: 1 loss 16.926387786865234 progress: epoch: 2 loss 15.558940887451172 progress: epoch: 3 loss 14.46370792388916 progress: epoch: 4 loss 13.450118064880371 progress: epoch: 5 loss 12.508138656616211 progress: epoch: 6 loss 11.63258171081543 progress: epoch: 7 loss 10.818733215332031 progress: epoch: 8 loss 10.062217712402344 progress: epoch: 9 loss 9.358969688415527 estimation of the parameters: tensor([[ 1.1006], [-1.0205]]) tensor([[0.5421]])
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Linear regression with neural network An implementation of **(Batch) Gradient Descent** using the nn package. Here we have a super simple model with only one layer and no activation function!
# Use the nn package to define our model as a sequence of layers. nn.Sequential # is a Module which contains other Modules, and applies them in sequence to # produce its output. Each Linear Module computes output from input using a # linear function, and holds internal Variables for its weight and bias. model = torch.nn.Sequential( torch.nn.Linear(2, 1), ) for m in model.children(): m.weight.data = w_init_t.clone().unsqueeze(0) m.bias.data = b_init_t.clone() # The nn package also contains definitions of popular loss functions; in this # case we will use Mean Squared Error (MSE) as our loss function. loss_fn = torch.nn.MSELoss(reduction='sum') # switch to train mode model.train() for epoch in range(10): # Forward pass: compute predicted y by passing x to the model. Module objects # override the __call__ operator so you can call them like functions. When # doing so you pass a Variable of input data to the Module and it produces # a Variable of output data. y_pred = model(x_t) # Note this operation is equivalent to: pred = model.forward(x_v) # Compute and print loss. We pass Variables containing the predicted and true # values of y, and the loss function returns a Variable containing the # loss. loss = loss_fn(y_pred, y_t) # Zero the gradients before running the backward pass. model.zero_grad() # Backward pass: compute gradient of the loss with respect to all the learnable # parameters of the model. Internally, the parameters of each Module are stored # in Variables with requires_grad=True, so this call will compute gradients for # all learnable parameters in the model. loss.backward() # Update the weights using gradient descent. Each parameter is a Tensor, so # we can access its data and gradients like we did before. with torch.no_grad(): for param in model.parameters(): param.data -= learning_rate * param.grad print("progress:", "epoch:", epoch, "loss",loss.data.item()) # After training print("estimation of the parameters:") for param in model.parameters(): print(param)
progress: epoch: 0 loss 26.03858184814453 progress: epoch: 1 loss 16.926387786865234 progress: epoch: 2 loss 15.558940887451172 progress: epoch: 3 loss 14.46370792388916 progress: epoch: 4 loss 13.450118064880371 progress: epoch: 5 loss 12.508138656616211 progress: epoch: 6 loss 11.63258171081543 progress: epoch: 7 loss 10.818733215332031 progress: epoch: 8 loss 10.062217712402344 progress: epoch: 9 loss 9.358969688415527 estimation of the parameters: Parameter containing: tensor([[ 1.1006, -1.0205]], requires_grad=True) Parameter containing: tensor([0.5421], requires_grad=True)
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Last step, we use directly the optim package to update the weights and bias.
model = torch.nn.Sequential( torch.nn.Linear(2, 1), ) for m in model.children(): m.weight.data = w_init_t.clone().unsqueeze(0) m.bias.data = b_init_t.clone() loss_fn = torch.nn.MSELoss(reduction='sum') model.train() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) for epoch in range(10): y_pred = model(x_t) loss = loss_fn(y_pred, y_t) print("progress:", "epoch:", epoch, "loss",loss.item()) # print("progress:", "epoch:", epoch, "loss",loss) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() # After training print("estimation of the parameters:") for param in model.parameters(): print(param)
progress: epoch: 0 loss 385.95172119140625 progress: epoch: 0 loss tensor(385.9517, grad_fn=<MseLossBackward>) progress: epoch: 1 loss 9597.4716796875 progress: epoch: 1 loss tensor(9597.4717, grad_fn=<MseLossBackward>) progress: epoch: 2 loss 595541.875 progress: epoch: 2 loss tensor(595541.8750, grad_fn=<MseLossBackward>) progress: epoch: 3 loss 37207608.0 progress: epoch: 3 loss tensor(37207608., grad_fn=<MseLossBackward>) progress: epoch: 4 loss 2324688896.0 progress: epoch: 4 loss tensor(2.3247e+09, grad_fn=<MseLossBackward>) progress: epoch: 5 loss 145243914240.0 progress: epoch: 5 loss tensor(1.4524e+11, grad_fn=<MseLossBackward>) progress: epoch: 6 loss 9074673451008.0 progress: epoch: 6 loss tensor(9.0747e+12, grad_fn=<MseLossBackward>) progress: epoch: 7 loss 566975210192896.0 progress: epoch: 7 loss tensor(5.6698e+14, grad_fn=<MseLossBackward>) progress: epoch: 8 loss 3.54239775768576e+16 progress: epoch: 8 loss tensor(3.5424e+16, grad_fn=<MseLossBackward>) progress: epoch: 9 loss 2.2132498365037937e+18 progress: epoch: 9 loss tensor(2.2132e+18, grad_fn=<MseLossBackward>) estimation of the parameters: Parameter containing: tensor([[2.2344e+08, 2.3512e+08]], requires_grad=True) Parameter containing: tensor([4.5320e+08], requires_grad=True)
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
RemarkThis problem can be solved in 3 lines of code!
xb_t = torch.cat((x_t,torch.ones(30).unsqueeze(1)),1) # print(xb_t) sol, _ =torch.lstsq(y_t,xb_t) print(sol[:3])
tensor([[ 2.0000], [-3.0000], [ 1.0000]])
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
Exercise: Play with the code Change the number of samples from 30 to 300. What happens? How to correct it?
x = random((300,2)) y = np.dot(x, [2., -3.]) + 1. x_t = torch.from_numpy(x).type(dtype) y_t = torch.from_numpy(y).type(dtype).unsqueeze(1) model = torch.nn.Sequential( torch.nn.Linear(2, 1), ) for m in model.children(): m.weight.data = w_init_t.clone().unsqueeze(0) m.bias.data = b_init_t.clone() loss_fn = torch.nn.MSELoss(reduction = 'mean') model.train() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) for epoch in range(10000): y_pred = model(x_t) loss = loss_fn(y_pred, y_t) if epoch%500==499: print("progress:", "epoch:", epoch+1, "loss",loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() # After training print("estimation of the parameters:") for param in model.parameters(): print(param)
progress: epoch: 499 loss 0.1583678424358368 progress: epoch: 999 loss 0.03177538886666298 progress: epoch: 1499 loss 0.006897071376442909 progress: epoch: 1999 loss 0.0016702644061297178 progress: epoch: 2499 loss 0.00045764277456328273 progress: epoch: 2999 loss 0.0001400149194523692 progress: epoch: 3499 loss 4.638753307517618e-05 progress: epoch: 3999 loss 1.614570828678552e-05 progress: epoch: 4499 loss 5.776500529464101e-06 progress: epoch: 4999 loss 2.09857080335496e-06 progress: epoch: 5499 loss 7.687903575970267e-07 progress: epoch: 5999 loss 2.8404440399754094e-07 progress: epoch: 6499 loss 1.0585888077230265e-07 progress: epoch: 6999 loss 3.994070496560198e-08 progress: epoch: 7499 loss 1.532848514784746e-08 progress: epoch: 7999 loss 5.943735281732643e-09 progress: epoch: 8499 loss 2.0236934350492675e-09 progress: epoch: 8999 loss 1.1486694928564134e-09 progress: epoch: 9499 loss 1.1486694928564134e-09 progress: epoch: 9999 loss 1.1486694928564134e-09 estimation of the parameters: Parameter containing: tensor([[ 2.0001, -2.9999]], requires_grad=True) Parameter containing: tensor([0.9999], requires_grad=True)
Apache-2.0
Module2/02b_linear_reg.ipynb
GenBill/notebooks
GHCN V2 Temperatures ANOM (C) CR 1200KM 1880-presentGLOBAL Temperature Anomalies in .01 C base period: 1951-1980http://climatecode.org/
import os import git if not os.path.exists('ccc-gistemp'): git.Git().clone('https://github.com/ClimateCodeFoundation/ccc-gistemp.git') if not os.path.exists('madqc'): git.Git().clone('https://github.com/ClimateCodeFoundation/madqc.git')
_____no_output_____
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
It seems thathttp://data.giss.nasa.gov/gistemp/sources_v3/GISTEMPv3_sources.tar.gzand http://data.giss.nasa.gov/pub/gistemp/SBBX.ERSST.gzare down, so let's use a local copy instead.
!mkdir -p ccc-gistemp/input !cp data/GISTEMPv3_sources.tar.gz data/SBBX.ERSST.gz ccc-gistemp/input %cd ccc-gistemp/
/home/filipe/Dropbox/Meetings/2018-CicloPalestrasComputacaoCientifica/notebooks/ccc-gistemp
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
We don't really need `pypy` for the fetch phase, but the code is Python 2 and the notebook is Python 3, so this is just a lazy way to call py2k code from a py3k notebook ;-pPS: we are also using the International Surface Temperature Initiative data (ISTI).
!pypy tool/fetch.py isti
input/isti.v1.tar.gz already exists. ... input/isti.merged.inv already exists. ... input/isti.merged.dat already exists.
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
QC the ISTI data.
!../madqc/mad.py --progress input/isti.merged.dat
100% ZIXLT831324 TAVG 1960 180
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
We need to copy the ISTI data into the `input` directory.
!cp isti.merged.qc.dat input/isti.merged.qc.dat !cp input/isti.merged.inv input/isti.merged.qc.inv
_____no_output_____
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
Here is where `pypy` is really needed, this step takes ~35 minutes on valina `python` but only ~100 seconds on `pypy`.
!pypy tool/run.py -p 'data_sources=isti.merged.qc.dat;element=TAVG' -s 0-1,3-5
input/ghcnm.tavg.latest.qca.tar.gz already exists. ... input/ghcnm.tavg.qca.dat already exists. input/GISTEMPv3_sources.tar.gz already exists. ... input/oisstv2_mod4.clim.gz already exists. ... input/sumofday.tbl already exists. ... input/v3.inv already exists. ... input/ushcn3.tbl already exists. ... input/mcdw.tbl already exists. ... input/Ts.strange.v3.list.IN_full already exists. ... input/antarc2.list already exists. ... input/antarc3.list already exists. ... input/antarc1.list already exists. ... input/antarc1.txt already exists. ... input/antarc2.txt already exists. ... input/t_hohenpeissenberg_200306.txt_as_received_July17_2003 already exists. ... input/antarc3.txt already exists. input/SBBX.ERSST.gz already exists. ... input/SBBX.ERSST already exists. ====> STEPS 0, 1, 3, 4, 5 ==== No more recent sea-surface data files. Load ISTI.MERGED.QC.DAT records (Reading average temperature) Step 0: closing output file. Step 1: closing output file. Region (+64/+90 S/N -180/-090 W/E): 0 empty cells. Region (+64/+90 S/N -090/+000 W/E): 0 empty cells. Region (+64/+90 S/N +000/+090 W/E): 0 empty cells. Region (+64/+90 S/N +090/+180 W/E): 0 empty cells. Region (+44/+64 S/N -180/-135 W/E): 0 empty cells. Region (+44/+64 S/N -135/-090 W/E): 0 empty cells. Region (+44/+64 S/N -090/-045 W/E): 0 empty cells. Region (+44/+64 S/N -045/+000 W/E): 0 empty cells. Region (+44/+64 S/N +000/+045 W/E): 0 empty cells. Region (+44/+64 S/N +045/+090 W/E): 0 empty cells. Region (+44/+64 S/N +090/+135 W/E): 0 empty cells. Region (+44/+64 S/N +135/+180 W/E): 0 empty cells. Region (+24/+44 S/N -180/-150 W/E): 31 empty cells. Region (+24/+44 S/N -150/-120 W/E): 2 empty cells. Region (+24/+44 S/N -120/-090 W/E): 0 empty cells. Region (+24/+44 S/N -090/-060 W/E): 0 empty cells. Region (+24/+44 S/N -060/-030 W/E): 10 empty cells. Region (+24/+44 S/N -030/+000 W/E): 0 empty cells. Region (+24/+44 S/N +000/+030 W/E): 0 empty cells. Region (+24/+44 S/N +030/+060 W/E): 0 empty cells. Region (+24/+44 S/N +060/+090 W/E): 0 empty cells. Region (+24/+44 S/N +090/+120 W/E): 0 empty cells. Region (+24/+44 S/N +120/+150 W/E): 0 empty cells. Region (+24/+44 S/N +150/+180 W/E): 26 empty cells. Region (+00/+24 S/N -180/-158 W/E): 1 empty cell. Region (+00/+24 S/N -158/-135 W/E): 40 empty cells. Region (+00/+24 S/N -135/-112 W/E): 80 empty cells. Region (+00/+24 S/N -112/-090 W/E): 19 empty cells. Region (+00/+24 S/N -090/-068 W/E): 0 empty cells. Region (+00/+24 S/N -068/-045 W/E): 9 empty cells. Region (+00/+24 S/N -045/-022 W/E): 29 empty cells. Region (+00/+24 S/N -022/+000 W/E): 1 empty cell. Region (+00/+24 S/N +000/+022 W/E): 0 empty cells. Region (+00/+24 S/N +022/+045 W/E): 0 empty cells. Region (+00/+24 S/N +045/+068 W/E): 3 empty cells. Region (+00/+24 S/N +068/+090 W/E): 0 empty cells. Region (+00/+24 S/N +090/+112 W/E): 0 empty cells. Region (+00/+24 S/N +112/+135 W/E): 0 empty cells. Region (+00/+24 S/N +135/+158 W/E): 0 empty cells. Region (+00/+24 S/N +158/+180 W/E): 2 empty cells. Region (-24/-00 S/N -180/-158 W/E): 0 empty cells. Region (-24/-00 S/N -158/-135 W/E): 0 empty cells. Region (-24/-00 S/N -135/-112 W/E): 55 empty cells. Region (-24/-00 S/N -112/-090 W/E): 67 empty cells. Region (-24/-00 S/N -090/-068 W/E): 7 empty cells. Region (-24/-00 S/N -068/-045 W/E): 0 empty cells. Region (-24/-00 S/N -045/-022 W/E): 0 empty cells. Region (-24/-00 S/N -022/+000 W/E): 2 empty cells. Region (-24/-00 S/N +000/+022 W/E): 0 empty cells. Region (-24/-00 S/N +022/+045 W/E): 0 empty cells. Region (-24/-00 S/N +045/+068 W/E): 0 empty cells. Region (-24/-00 S/N +068/+090 W/E): 29 empty cells. Region (-24/-00 S/N +090/+112 W/E): 1 empty cell. Region (-24/-00 S/N +112/+135 W/E): 0 empty cells. Region (-24/-00 S/N +135/+158 W/E): 0 empty cells. Region (-24/-00 S/N +158/+180 W/E): 0 empty cells. Region (-44/-24 S/N -180/-150 W/E): 25 empty cells. Region (-44/-24 S/N -150/-120 W/E): 37 empty cells. Region (-44/-24 S/N -120/-090 W/E): 48 empty cells. Region (-44/-24 S/N -090/-060 W/E): 2 empty cells. Region (-44/-24 S/N -060/-030 W/E): 21 empty cells. Region (-44/-24 S/N -030/+000 W/E): 18 empty cells. Region (-44/-24 S/N +000/+030 W/E): 15 empty cells. Region (-44/-24 S/N +030/+060 W/E): 5 empty cells. Region (-44/-24 S/N +060/+090 W/E): 21 empty cells. Region (-44/-24 S/N +090/+120 W/E): 43 empty cells. Region (-44/-24 S/N +120/+150 W/E): 0 empty cells. Region (-44/-24 S/N +150/+180 W/E): 0 empty cells. Region (-64/-44 S/N -180/-135 W/E): 74 empty cells. Region (-64/-44 S/N -135/-090 W/E): 72 empty cells. Region (-64/-44 S/N -090/-045 W/E): 0 empty cells. Region (-64/-44 S/N -045/+000 W/E): 20 empty cells. Region (-64/-44 S/N +000/+045 W/E): 44 empty cells. Region (-64/-44 S/N +045/+090 W/E): 5 empty cells. Region (-64/-44 S/N +090/+135 W/E): 60 empty cells. Region (-64/-44 S/N +135/+180 W/E): 1 empty cell. Region (-90/-64 S/N -180/-090 W/E): 4 empty cells. Region (-90/-64 S/N -090/+000 W/E): 0 empty cells. Region (-90/-64 S/N +000/+090 W/E): 0 empty cells. Region (-90/-64 S/N +090/+180 W/E): 0 empty cells. Step3: closing output file Step4: closing output file WARNING: Bad mix of land and ocean data. Land range from 1880-01 to 2018-02; Ocean range from 1880-01 to 2015-09. Step 5: Closing box file: result/landBX.Ts.GHCN.CL.PA.1200 Step 5: Closing box file: result/oceanBX.Ts.ERSST.CL.PA Step 5: Closing box file: result/mixedBX.Ts.ERSST.GHCN.CL.PA.1200 ... running vischeck See result/google-chart.url ====> Timing Summary ==== Run took 216.1 seconds
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
Python `gistemp` saves the results in the same format as the Fortran program but it ships with `gistemp2csv.py` to make it easier to read the data with `pandas`.
!pypy tool/gistemp2csv.py result/*.txt import pandas as pd df = pd.read_csv( 'result/landGLB.Ts.GHCN.CL.PA.csv', skiprows=3, index_col=0, na_values=('*****', '****'), )
_____no_output_____
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
Let's use `sklearn` to compute the full trend...
from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score reg0 = linear_model.LinearRegression() series0 = df['J-D'].dropna() y = series0.values X = series0.index.values[:, None] reg0.fit(X, y) y_pred0 = reg0.predict(X) R2_0 = mean_squared_error(y, y_pred0) var0 = r2_score(y, y_pred0)
_____no_output_____
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
and the past 30 years trend.
reg1 = linear_model.LinearRegression() series1 = df['J-D'].dropna().iloc[-30:] y = series1.values X = series1.index.values[:, None] reg1.fit(X, y) y_pred1 = reg1.predict(X) R2_1 = mean_squared_error(y[-30:], y_pred1) var1 = r2_score(y[-30:], y_pred1) %matplotlib inline ax = df.plot.line(y='J-D', figsize=(9, 9), legend=None) ax.plot(series0.index, y_pred0, 'r--') ax.plot(series1.index, y_pred1, 'r') ax.set_xlim([1879, 2018]) leg = f"""Trend in ℃/century (R²) Full: {reg0.coef_[0]*100:0.2f} ({var0:0.2f}) 30-year: {reg1.coef_[0]*100:0.2f} ({var1:0.2f}) """ ax.text(0.10, 0.75, leg, transform=ax.transAxes);
_____no_output_____
CC0-1.0
notebooks/gistemp.ipynb
ocefpaf/bioinfo
Inheriting from Unit Abstract attributes and methods ![](./Unit_UML.png "Unit UML Diagram") **A Unit subclass has class attributes that dictate how an instance is initialized:** * `_BM` : dict[str, float] Bare module factors for each purchase cost item.* `_units` : [dict] Units of measure for the `design_results` items.* `_N_ins`=1 : [int] Expected number of input streams. * `_N_outs`=2 : [int] Expected number of output streams. * `_ins_size_is_fixed`=True : [bool] Whether the number of streams in ins is fixed. * `_outs_size_is_fixed`=True : [bool] Whether the number of streams in outs is fixed. * `_N_heat_utilities`=0 : [int] Number of heat utility objects in the `heat_utilities` tuple. * `_stream_link_options`=None : [StreamLinkOptions] Options for linking streams.* `auxiliary_unit_names`=() : tuple[str] Name of attributes that are auxiliary units.* `_graphics` : [biosteam Graphics] A Graphics object for diagram representation. Defaults to a box diagram. * `line` : [str] Label for the unit operation in a diagram. Defaults to the class name.**Abstract methods are used to setup stream conditions, run heat and mass balances, find design requirements, and cost the unit:*** `_setup()` : Called before System convergece to initialize constant data and setup stream conditions.* `_run()` : Called during System convergece to specify `outs` streams.* `_design()` : Called after System convergence to find design requirements. * `_cost()` : Called after `_design` to find cost requirements.**These abstract methods will rely on the following instance attributes:*** `ins` : Ins[Stream] Input streams.* `outs` : Outs[Stream] Output streams.* `power_utility` : [PowerUtility] Can find electricity rate requirement.* `heat_utilities` : tuple[HeatUtility] Can find cooling and heating requirements.* `design_results` : [dict] All design requirements.* `purchase_costs` : [dict] Itemized purchase costs.* `thermo` : [Thermo] The thermodynamic property package used by the unit. Subclass example The following example depicts inheritance from Unit by creating a new Boiler class:
import biosteam as bst from math import ceil class Boiler(bst.Unit): """ Create a Boiler object that partially boils the feed. Parameters ---------- ins : stream Inlet fluid. outs : stream sequence * [0] vapor product * [1] liquid product V : float Molar vapor fraction. P : float Operating pressure [Pa]. """ # Note that the documentation does not include `ID` or `thermo` in the parameters. # This is OK, and most subclasses in BioSTEAM are documented this way too. # Documentation for all unit operations should include the inlet and outlet streams # listed by index. If there is only one stream in the inlets (or outlets), there is no # need to list out by index. The types for the `ins` and `outs` should be either # `stream sequence` for multiple streams, or `stream` for a single stream. # Any additional arguments to the unit should also be listed (e.g. V, and P). _N_ins = 1 _N_outs = 2 _N_heat_utilities = 1 _BM = {'Evaporators': 2.45} _units = {'Area': 'm^2'} def __init__(self, ID='', ins=None, outs=(), thermo=None, *, V, P): bst.Unit.__init__(self, ID, ins, outs, thermo) # Initialize MultiStream object to perform vapor-liquid equilibrium later # NOTE: ID is None to not register it in the flowsheet self._multistream = bst.MultiStream(None, thermo=self.thermo) self.V = V #: Molar vapor fraction. self.P = P #: Operating pressure [Pa]. def _setup(self): gas, liq = self.outs # Initialize top stream as a gas gas.phase = 'g' # Initialize bottom stream as a liquid liq.phase = 'l' def _run(self): feed = self.ins[0] gas, liq = self.outs # Perform vapor-liquid equilibrium ms = self._multistream ms.imol['l'] = feed.mol ms.vle(V=self.V, P=self.P) # Update output streams gas.mol[:] = ms.imol['g'] liq.mol[:] = ms.imol['l'] gas.T = liq.T = ms.T gas.P = liq.P = ms.P # Reset flow to prevent accumulation in multiple simulations ms.empty() def _design(self): # Calculate heat utility requirement (please read docs for HeatUtility objects) T_operation = self._multistream.T duty = self.H_out - self.H_in if duty < 0: raise RuntimeError(f'{repr(self)} is cooling.') hu = self.heat_utilities[0] hu(duty, T_operation) # Temperature of utility at entrance T_utility = hu.inlet_utility_stream.T # Temeperature gradient dT = T_utility - T_operation # Heat transfer coefficient kJ/(hr*m2*K) U = 8176.699 # Area requirement (m^2) A = duty/(U*dT) # Maximum area per unit A_max = 743.224 # Number of units N = ceil(A/A_max) # Design requirements are stored here self.design_results['Area'] = A/N self.design_results['N'] = N def _cost(self): A = self.design_results['Area'] N = self.design_results['N'] # Long-tube vertical boiler cost correlation from # "Product process and design". Warren et. al. (2016) Table 22.32, pg 592 purchase_cost = N*bst.CE*3.086*A**0.55 # Itemized purchase costs are stored here self.purchase_costs['Boilers'] = purchase_cost
_____no_output_____
MIT
docs/tutorial/Inheriting_from_Unit.ipynb
sarangbhagwat/biosteam
Simulation test
import biosteam as bst bst.settings.set_thermo(['Water']) water = bst.Stream('water', Water=300) B1 = Boiler('B1', ins=water, outs=('gas', 'liq'), V=0.5, P=101325) B1.diagram() B1.show() B1.simulate() B1.show() B1.results()
_____no_output_____
MIT
docs/tutorial/Inheriting_from_Unit.ipynb
sarangbhagwat/biosteam
Graphviz attributes All [graphviz](https://graphviz.readthedocs.io/en/stable/manual.html) attributes for generating a diagram are stored in `_graphics` as a Graphics object. One Graphics object is generated for each Unit subclass:
graphics = Boiler._graphics edge_in = graphics.edge_in edge_out = graphics.edge_out node = graphics.node # Attributes correspond to each inlet stream respectively # For example: Attributes for B1.ins[0] would correspond to edge_in[0] edge_in # Attributes correspond to each outlet stream respectively # For example: Attributes for B1.outs[0] would correspond to edge_out[0] edge_out node # The node represents the actual unit
_____no_output_____
MIT
docs/tutorial/Inheriting_from_Unit.ipynb
sarangbhagwat/biosteam
These attributes can be changed to the user's liking:
edge_out[0]['tailport'] = 'n' edge_out[1]['tailport'] = 's' node['width'] = '1' node['height'] = '1.2' B1.diagram()
_____no_output_____
MIT
docs/tutorial/Inheriting_from_Unit.ipynb
sarangbhagwat/biosteam
It is also possible to dynamically adjust node and edge attributes by setting the `tailor_node_to_unit` attribute:
def tailor_node_to_unit(node, unit): feed = unit.ins[0] if not feed.F_mol: node['name'] += '\n-empty-' graphics.tailor_node_to_unit = tailor_node_to_unit B1.diagram() B1.ins[0].empty() B1.diagram()
_____no_output_____
MIT
docs/tutorial/Inheriting_from_Unit.ipynb
sarangbhagwat/biosteam
Altitude
q = 0.001 A = np.array([[1.0, 0.1, 0.005], [0, 1.0, 0.1], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) # R = np.array([[0.5, 0.0], [0.0, 0.0012]]) # Q = np.array([[q, 0.0, 0.0], [0.0, q, 0.0], [0.0, 0.0, q]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) def kalman_update(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return new_altitude def objective_function(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return mean_squared_error(df['Altitude'], new_altitude) # space = { # "r1": hp.choice("r1", np.arange(0.01, 90, 0.005)), # "r2": hp.choice("r2", np.arange(0.01, 90, 0.005)), # "q1": hp.choice("q1", np.arange(0.0001, 0.0009, 0.0001)) # } len(np.arange(0.00001, 0.09, 0.00001)) space = { "r1": hp.choice("r1", np.arange(0.001, 90, 0.001)), "r2": hp.choice("r2", np.arange(0.001, 90, 0.001)), "q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001)) } # Initialize trials object trials = Trials() best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials ) print(best) # -> {'a': 1, 'c2': 0.01420615366247227} print(space_eval(space, best)) # -> ('case 2', 0.01420615366247227} d1 = space_eval(space, best) objective_function(d1) %%timeit objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) y = kalman_update(d1) current = kalman_update({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) plt.figure(figsize=(20, 10)) plt.plot(noisy_df.Time, df['Altitude'], linewidth=2, color="r", label="Actual") plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32") plt.plot(noisy_df.Time, noisy_df['Altitude'], linewidth=2, color="y", label="Noisy") plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted") plt.legend() plt.show() def kalman_update_return_velocity(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return new_velocity def objective_function(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return mean_squared_error(df['Vertical_velocity'], new_velocity) space = { "r1": hp.choice("r1", np.arange(0.001, 90, 0.001)), "r2": hp.choice("r2", np.arange(0.001, 90, 0.001)), "q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001)) } # Initialize trials object trials = Trials() best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials ) print(best) print(space_eval(space, best)) d2 = space_eval(space, best) objective_function(d2) y = kalman_update_return_velocity(d2) current = kalman_update_return_velocity({'q1': 0.0013, 'r1': 0.25, 'r2': 0.65}) previous = kalman_update_return_velocity({'q1': 0.08519, 'r1': 4.719, 'r2': 56.443}) plt.figure(figsize=(20, 10)) plt.plot(noisy_df.Time, df['Vertical_velocity'], linewidth=2, color="r", label="Actual") plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32") plt.plot(noisy_df.Time, previous, linewidth=2, color="c", label="With previous data") plt.plot(noisy_df.Time, noisy_df['Vertical_velocity'], linewidth=2, color="y", label="Noisy") plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted") plt.legend() plt.show()
_____no_output_____
MIT
Src/Notebooks/oprimizeValues.ipynb
nakujaproject/MPUdata
import os import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf !pip install tensorflow-addons import tensorflow_addons as tfa from sklearn.model_selection import KFold, train_test_split !git clone https://github.com/naufalhisyam/TurbidityPrediction-thesis.git os.chdir('/content/TurbidityPrediction-thesis') images = pd.read_csv(r'./Datasets/0degree_lowrange/0degInfo.csv') #load dataset info train_df, test_df = train_test_split(images, train_size=0.9, shuffle=True, random_state=1) Y = train_df[['Turbidity']] VALIDATION_R2 = [] VALIDATION_LOSS = [] VALIDATION_MSE = [] VALIDATION_MAE = [] name = 'ResNet_0deg_withTL' save_dir = f'saved_models/{name}' if not os.path.exists(save_dir): os.makedirs(save_dir) def get_model(): #Create model base_model = tf.keras.applications.ResNet50(include_top=False, weights='imagenet', input_shape=(224, 224, 3), pooling='avg') out = base_model.output prediction = tf.keras.layers.Dense(1, activation="linear")(out) model = tf.keras.Model(inputs = base_model.input, outputs = prediction) #Compile the model return model def get_model_name(k): return 'resnet_'+str(k)+'.h5' tf.test.gpu_device_name() train_generator = tf.keras.preprocessing.image.ImageDataGenerator( horizontal_flip=True ) test_generator = tf.keras.preprocessing.image.ImageDataGenerator( horizontal_flip=True ) kf = KFold(n_splits = 5) fold_var = 1 for train_index, val_index in kf.split(np.zeros(Y.shape[0]),Y): training_data = train_df.iloc[train_index] validation_data = train_df.iloc[val_index] train_images = train_generator.flow_from_dataframe(training_data, x_col = "Filepath", y_col = "Turbidity", target_size=(224, 224), color_mode='rgb', class_mode = "raw", shuffle = True) val_images = train_generator.flow_from_dataframe(validation_data, x_col = "Filepath", y_col = "Turbidity", target_size=(224, 224), color_mode='rgb', class_mode = "raw", shuffle = True) # CREATE NEW MODEL model = get_model() # COMPILE NEW MODEL opt = tf.keras.optimizers.Adam(learning_rate=1e-4, decay=1e-6) model.compile(loss=tf.keras.losses.Huber(), optimizer=opt, metrics=['mae','mse', tfa.metrics.RSquare(name="R2")]) # CREATE CALLBACKS checkpoint_filepath = f'{save_dir}/{get_model_name(fold_var)}' checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # There can be other callbacks, but just showing one because it involves the model name # This saves the best model # FIT THE MODEL history = model.fit(train_images, epochs=100, callbacks=callbacks_list, validation_data=val_images) # LOAD BEST MODEL to evaluate the performance of the model model.load_weights(f"{save_dir}/resnet_"+str(fold_var)+".h5") results = model.evaluate(val_images) results = dict(zip(model.metrics_names,results)) VALIDATION_R2.append(results['R2']) VALIDATION_MAE.append(results['mae']) VALIDATION_MSE.append(results['mse']) VALIDATION_LOSS.append(results['loss']) tf.keras.backend.clear_session() fold_var += 1 train_images = train_generator.flow_from_dataframe( dataframe=train_df, x_col='Filepath', y_col='Turbidity', target_size=(224, 224), color_mode='rgb', class_mode='raw', shuffle=False, ) test_images = test_generator.flow_from_dataframe( dataframe=test_df, x_col='Filepath', y_col='Turbidity', target_size=(224, 224), color_mode='rgb', class_mode='raw', shuffle=False ) min_fold = min(range(len(VALIDATION_LOSS)), key=VALIDATION_LOSS.__getitem__) + 1 model = get_model() model.load_weights(f"{save_dir}/resnet_"+str(min_fold)+".h5") opt = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=1e-6) model.compile(loss=tf.keras.losses.Huber(), optimizer=opt, metrics=['mae','mse', tfa.metrics.RSquare(name="R2")]) test_pred = np.squeeze(model.predict(test_images)) test_true = test_images.labels test_residuals = test_true - test_pred train_pred = np.squeeze(model.predict(train_images)) train_true = train_images.labels train_residuals = train_true - train_pred train_score = model.evaluate(train_images) test_score = model.evaluate(test_images) print('test ',test_score) print('train ', train_score) f, axs = plt.subplots(1, 2, figsize=(8,6), gridspec_kw={'width_ratios': [4, 1]}) f.suptitle(f'Residual Plot - {name}', fontsize=13, fontweight='bold', y=0.92) axs[0].scatter(train_pred,train_residuals, label='Train Set', alpha=0.75, color='tab:blue') axs[0].scatter(test_pred,test_residuals, label='Test Set', alpha=0.75, color='tab:orange') axs[0].set_ylabel('Residual (NTU)') axs[0].set_xlabel('Predicted Turbidity (NTU)') axs[0].axhline(0, color='black') axs[0].legend() axs[0].grid() axs[1].hist(train_residuals, bins=50, orientation="horizontal", density=True, alpha=0.9, color='tab:blue') axs[1].hist(test_residuals, bins=50, orientation="horizontal", density=True, alpha=0.75, color='tab:orange') axs[1].axhline(0, color='black') axs[1].set_xlabel('Distribution') axs[1].yaxis.tick_right() axs[1].grid(axis='y') plt.subplots_adjust(wspace=0.05) plt.savefig(f'{save_dir}/residualPlot_{name}.png', dpi=150) plt.show() fig, ax = plt.subplots(1,2,figsize=(13,6)) fig.suptitle(f'Nilai Prediksi vs Observasi - {name}', fontsize=13, fontweight='bold', y=0.96) ax[0].scatter(test_true,test_pred, label=f'$Test\ R^2=${round(test_score[3],3)}',color='tab:orange', alpha=0.75) theta = np.polyfit(test_true, test_pred, 1) y_line = theta[1] + theta[0] * test_true ax[0].plot([test_true.min(), test_true.max()], [y_line.min(), y_line.max()],'k--', lw=2,label='best fit') ax[0].plot([test_true.min(), test_true.max()], [test_true.min(), test_true.max()], 'k--', lw=2, label='identity',color='dimgray') ax[0].set_xlabel('Measured Turbidity (NTU)') ax[0].set_ylabel('Predicted Turbidity (NTU)') ax[0].set_title(f'Test Set', fontsize=10, fontweight='bold') ax[0].set_xlim([0, 130]) ax[0].set_ylim([0, 130]) ax[0].grid() ax[0].legend() ax[1].scatter(train_true,train_pred, label=f'$Train\ R^2=${round(train_score[3],3)}', color='tab:blue', alpha=0.75) theta2 = np.polyfit(train_true, train_pred, 1) y_line2 = theta2[1] + theta2[0] * train_true ax[1].plot([train_true.min(), train_true.max()], [y_line2.min(), y_line2.max()],'k--', lw=2,label='best fit') ax[1].plot([train_true.min(), train_true.max()], [train_true.min(),train_true.max()], 'k--', lw=2, label='identity',color='dimgray') ax[1].set_xlabel('Measured Turbidity (NTU)') ax[1].set_ylabel('Predicted Turbidity (NTU)') ax[1].set_title(f'Train Set', fontsize=10, fontweight='bold') ax[1].set_xlim([0, 130]) ax[1].set_ylim([0, 130]) ax[1].grid() ax[1].legend() plt.savefig(f'{save_dir}/predErrorPlot_{name}.png', dpi=150) plt.show() cv_df = pd.DataFrame.from_dict({'val_loss': VALIDATION_LOSS, 'val_mae': VALIDATION_MAE, 'val_mse': VALIDATION_MSE, 'val_R2': VALIDATION_R2}, orient='index').T cv_csv_file = f'{save_dir}/cross_val.csv' with open(cv_csv_file, mode='w') as f: cv_df.to_csv(f) from google.colab import drive drive.mount('/content/gdrive') save_path = f"/content/gdrive/MyDrive/MODEL BERHASIL/ResNet/{name}" if not os.path.exists(save_path): os.makedirs(save_path) oripath = "saved_models/." !cp -a "{oripath}" "{save_path}" # copies files to google drive
_____no_output_____
MIT
ResNet50_CV.ipynb
naufalhisyam/TurbidityPrediction-thesis
Copyright 2019 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Lab 04a: Dogs vs Cats Image Classification Without Image Augmentation Run in Google Colab View source on GitHub In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. Specific concepts that will be covered:In the process, we will build practical experience and develop intuition around the following concepts* Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator` class — How can we efficiently work with data on disk to interface with our model?* _Overfitting_ - what is it, how to identify it?**Before you begin**Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits. Importing packages Let's start by importing required packages:* os — to read files and directory structure* numpy — for some matrix math outside of TensorFlow* matplotlib.pyplot — to plot the graph and display images in our training and validation data
import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import matplotlib.pyplot as plt import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR)
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Data Loading To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of Dogs vs. Cats dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research).In previous Colabs, we've used TensorFlow Datasets, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem.
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
The dataset we have downloaded has the following directory structure.cats_and_dogs_filtered|__ train |______ cats: [cat.0.jpg, cat.1.jpg, cat.2.jpg ...] |______ dogs: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]|__ validation |______ cats: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ...] |______ dogs: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...]We can list the directories with the following terminal command:
zip_dir_base = os.path.dirname(zip_dir) !find $zip_dir_base -type d -print
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
We'll now assign variables with the proper file path for the training and validation sets.
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Understanding our data Let's look at how many cats and dogs images we have in our training and validation directory
num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val)
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Setting Model Parameters For convenience, we'll set up variables that will be used later while pre-processing our dataset and training our network.
BATCH_SIZE = 100 # Number of training examples to process before updating our models variables IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Data Preparation Images must be formatted into appropriately pre-processed floating point tensors before being fed into the network. The steps involved in preparing these images are:1. Read images from the disk2. Decode contents of these images and convert it into proper grid format as per their RGB content3. Convert them into floating point tensors4. Rescale the tensors from values between 0 and 255 to values between 0 and 1Fortunately, all these tasks can be done using the class **tf.keras.preprocessing.image.ImageDataGenerator**.We can set this up in a couple of lines of code.
train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk, apply rescaling, and resize them using single line of code.
train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary')
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Visualizing Training images We can visualize our training images by getting a batch of images from the training generator, and then plotting a few of them using `matplotlib`.
sample_training_images, _ = next(train_data_gen)
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
The `next` function returns a batch from the dataset. One batch is a tuple of (*many images*, *many labels*). For right now, we're discarding the labels because we just want to look at the images.
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip(images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() plotImages(sample_training_images[:5]) # Plot images 0-4
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Model Creation Exercise 4.1 Define the modelThe model consists of four convolution blocks with a max pool layer in each of them. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`. The list of model layers:* 2D Convolution - 32 filters, 3x3 kernel, ReLU activation* 2D Max pooling - 2x2 kernel* 2D Convolution - 64 filters, 3x3 kernel, ReLU activation* 2D Max pooling - 2x2 kernel* 2D Convolution - 128 filters, 3x3 kernel, ReLU activation* 2D Max pooling - 2x2 kernel* 2D Convolution - 128 filters, 3x3 kernel, ReLU activation* 2D Max pooling - 2x2 kernel* Flatten* Dense - 512 nodes* Dense - 2 nodesCheck the documentation for how to specify the layers [https://www.tensorflow.org/api_docs/python/tf/keras/layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers)
model = tf.keras.models.Sequential([ # TODO - Create the CNN model as specified above ])
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Exercise 4.1 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.1.ipynb) Exercise 4.2 Compile the modelAs usual, we will use the `adam` optimizer. Since we output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument.
# TODO - Compile the model
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Exercise 4.2 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.2.ipynb) Model SummaryLet's look at all the layers of our network using **summary** method.
model.summary()
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Exercise 4.3 Train the model It's time we train our network.* Since we have a validation dataset, we can use this to evaluate our model as it trains by adding the `validation_data` parameter. * `validation_steps` can also be added if you'd like to use less than full validation set.
# TODO - Fit the model
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
Exercise 4.3 SolutionThe solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.3.ipynb) Visualizing results of the training We'll now visualize the results we get after training our network.
acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(EPOCHS) plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.savefig('./foo.png') plt.show()
_____no_output_____
CC-BY-4.0
notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb
rses-dl-course/rses-dl-course.github.io
CHANDAN KUMAR (BATCH 3)- GOOGLE COLAB / logistic regression & Rigid & Lasso Regression(Rahul Agnihotri(T.L)) DATASET [HEART ](https://drive.google.com/file/d/10dopwCjH4VE557tSynCcY3fV9OBowq9h/view?usp=sharing) Packages to load
import numpy as np import pandas as pd from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import GridSearchCV # for hiding warning import warnings warnings.filterwarnings('ignore')
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Input directory
heart_df = pd.read_csv(r'/content/heart.csv') heart_df
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
About data set The "target" field refers to the presence of heart disease in the patient. It is integer valued 0 = no/less chance of heart attack and 1 = more chance of heart attackAttribute Information- 1) age- 2) sex- 3) chest pain type (4 values)- 4) resting blood pressure- 5) serum cholestoral in mg/dl- 6)fasting blood sugar > 120 mg/dl- 7) resting electrocardiographic results (values 0,1,2)- 8) maximum heart rate achieved- 9) exercise induced angina- 10) oldpeak = ST depression induced by exercise relative to rest- 11)the slope of the peak exercise ST segment- 12) number of major vessels (0-3) colored by flourosopy- 13) thal: 0 = normal; 1 = fixed defect; 2 = reversable defect- 14) target: 0= less chance of heart attack 1= more chance of heart attack Get to know About data
heart_df.head() heart_df.dtypes heart_df.isnull().sum() print('Shape : ',heart_df.shape) print('Describe : ',heart_df.describe())
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
EDA(Exploratory Data Analysis)
#import pandas_profiling as pp #pp.ProfileReport(heart_df) %matplotlib inline from matplotlib import pyplot as plt fig,axes=plt.subplots(nrows=1,ncols=1,figsize=(10,5)) sns.countplot(heart_df.target) fig,axes=plt.subplots(nrows=1,ncols=1,figsize=(15,10)) sns.distplot(heart_df['age'],hist=True,kde=True,rug=False,label='age',norm_hist=True) heart_df.columns corr = heart_df.corr(method = 'pearson') corr colormap = plt.cm.OrRd plt.figure(figsize=(15, 10)) plt.title("Person Correlation of Features", y = 1.05, size = 15) sns.heatmap(corr.astype(float).corr(), linecolor = "white", cmap = colormap, annot = True) import plotly.express as px px.bar(heart_df, x= 'age' , y='target', color='sex' , title= 'heart attack patoents age range and sex', labels = { 'output': 'Number of patients', 'Age': 'Age od patient'})
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Creating and Predicting Learning Models
X= heart_df.drop(columns= ['target']) y= heart_df['target']
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Data normalization
from sklearn.preprocessing import MinMaxScaler # Data normalization [0, 1] transformer = MinMaxScaler() transformer.fit(X) X = transformer.transform(X) X from sklearn.model_selection import train_test_split x_test,x_train,y_test,y_train = train_test_split(X,y,test_size = 0.2,random_state = 123) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train,y_train) y_pred = lr.predict( x_test) y_pred_proba = lr.predict_proba(x_test)[:, 1]
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Confusion_matrix - conf_mat=multiclass,- colorbar=True,- show_absolute=False,- show_normed=True,- class_names=class_names
from sklearn.metrics import confusion_matrix, classification_report from mlxtend.plotting import plot_confusion_matrix cm=confusion_matrix(y_test, y_pred) fig, ax = plot_confusion_matrix(conf_mat=cm) plt.rcParams['font.size'] = 40 #(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names) plt.show() # 0,0 # 0,1 # 1,0 # 1,1 print(classification_report(y_test, y_pred)) from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss [fpr, tpr, thr] = roc_curve(y_test, y_pred_proba) print('Train/Test split results:') print(lr.__class__.__name__+" accuracy is %2.3f" % accuracy_score(y_test, y_pred)) print(lr.__class__.__name__+" log_loss is %2.3f" % log_loss(y_test, y_pred_proba)) print(lr.__class__.__name__+" auc is %2.3f" % auc(fpr, tpr)) idx = np.min(np.where(tpr > 0.95)) # index of the first threshold for which the sensibility > 0.95 plt.figure(figsize=(10,10)) plt.plot(fpr, tpr, color='coral', label='ROC curve (area = %0.3f)' % auc(fpr, tpr)) plt.plot([0, 1], [0, 1], 'k--') plt.plot([0,fpr[idx]], [tpr[idx],tpr[idx]], 'k--', color='blue') plt.plot([fpr[idx],fpr[idx]], [0,tpr[idx]], 'k--', color='blue') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate (1 - specificity)', fontsize=5) plt.ylabel('True Positive Rate (recall)', fontsize=5) plt.title('Receiver operating characteristic (ROC) curve') plt.legend(loc="lower right") plt.show() heart_df.corr() from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn import metrics LR_model= LogisticRegression() tuned_parameters = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] , 'penalty':['l1','l2'] }
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
L1 and L2 are regularization parameters.They're used to avoid overfiting.Both L1 and L2 regularization prevents overfitting by shrinking (imposing a penalty) on the coefficients.L1 is the first moment norm |x1-x2| (|w| for regularization case) that is simply the absolute dıstance between two points where L2 is second moment norm corresponding to Eucledian Distance that is |x1-x2|^2 (|w|^2 for regularization case).In simple words,L2 (Ridge) shrinks all the coefficient by the same proportions but eliminates none, while L1 (Lasso) can shrink some coefficients to zero, performing variable selection. If all the features are correlated with the label, ridge outperforms lasso, as the coefficients are never zero in ridge. If only a subset of features are correlated with the label, lasso outperforms ridge as in lasso model some coefficient can be shrunken to zero.
heart_df.corr() from sklearn.model_selection import GridSearchCV LR= GridSearchCV(LR_model, tuned_parameters,cv=10) LR.fit(x_train,y_train) print(LR.best_params_) y_prob = LR.predict_proba(x_test)[:,1] # This will give positive class prediction probabilities y_pred = np.where(y_prob > 0.5, 1, 0) # This will threshold the probabilities to give class predictions. LR.score(x_test, y_pred) confusion_matrix=metrics.confusion_matrix(y_test,y_pred) confusion_matrix from sklearn.metrics import confusion_matrix, classification_report from mlxtend.plotting import plot_confusion_matrix cm=confusion_matrix(y_test, y_pred) fig, ax = plot_confusion_matrix(conf_mat=cm) plt.rcParams['font.size'] = 40 #(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names) plt.show() auc_roc=metrics.classification_report(y_test,y_pred) auc_roc auc_roc=metrics.roc_auc_score(y_test,y_pred) auc_roc from sklearn.metrics import roc_curve, auc false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_prob) roc_auc = auc(false_positive_rate, true_positive_rate) roc_auc LR_ridge= LogisticRegression(penalty='l2') LR_ridge.fit(x_train,y_train) y_prob = LR_ridge.predict_proba(x_test)[:,1] # This will give positive class prediction probabilities y_pred = np.where(y_prob > 0.5, 1, 0) # This will threshold the probabilities to give class predictions. LR_ridge.score(x_test, y_pred) confusion_matrix=metrics.confusion_matrix(y_test,y_pred) confusion_matrix from sklearn.metrics import confusion_matrix, classification_report from mlxtend.plotting import plot_confusion_matrix cm=confusion_matrix(y_test, y_pred) fig, ax = plot_confusion_matrix(conf_mat=cm) plt.rcParams['font.size'] = 40 #(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names) plt.show() auc_roc=metrics.classification_report(y_test,y_pred) auc_roc auc_roc=metrics.roc_auc_score(y_test,y_pred) auc_roc from sklearn.metrics import roc_curve, auc false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_prob) roc_auc = auc(false_positive_rate, true_positive_rate) roc_auc
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
**EXPERIMENTAL ZONE** LASSO AND RIDGE``` This is formatted as code```
Training_Accuracy_Before = [] Testing_Accuracy_Before = [] Training_Accuracy_After = [] Testing_Accuracy_After = [] Models = ['Linear Regression', 'Lasso Regression', 'Ridge Regression'] alpha_space = np.logspace(-4, 0, 30) # Checking for alpha from .0001 to 1 and finding the best value for alpha alpha_space ridge_scores = [] ridge = Ridge(normalize = True) for alpha in alpha_space: ridge.alpha = alpha val = np.mean(cross_val_score(ridge,x_train,y_train, cv = 10)) ridge_scores.append(val) lasso_scores = [] lasso = Lasso(normalize = True) for alpha in alpha_space: lasso.alpha = alpha val = np.mean(cross_val_score(lasso, x_train,y_train, cv = 10)) lasso_scores.append(val) plt.figure(figsize=(8, 8)) plt.plot(alpha_space, ridge_scores, marker = 'D', label = "Ridge") plt.plot(alpha_space, lasso_scores, marker = 'D', label = "Lasso") plt.legend() plt.show() # Performing GridSearchCV with Cross Validation technique on Lasso Regression and finding the optimum value of alpha params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08 lasso = Lasso(normalize=True) lasso_model = GridSearchCV(lasso, params, cv = 10) lasso_model.fit(x_train, y_train) print(lasso_model.best_params_) print(lasso_model.best_score_) # Using value of alpha as 0.009545 to get best accuracy for Lasso Regression lasso = Lasso(alpha = 0.009545, normalize = True) lasso.fit(x_train, y_train) train_score = lasso.score(x_train, y_train) print(train_score) test_score = lasso.score(x_test, y_test) print(test_score) Training_Accuracy_Before.append(train_score) Testing_Accuracy_Before.append(test_score) # Performing GridSearchCV with Cross Validation technique on Ridge Regression and finding the optimum value of alpha params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08 ridge = Ridge(normalize=True) ridge_model = GridSearchCV(ridge, params, cv = 10) ridge_model.fit(x_train, y_train) print(ridge_model.best_params_) print(ridge_model.best_score_) # Using value of alpha as 1.2045035 to get best accuracy for Ridge Regression ridge = Ridge(alpha = 1.2045035, normalize = True) ridge.fit(x_train, y_train) train_score = ridge.score(x_train, y_train) print(train_score) test_score = ridge.score(x_test, y_test) print(test_score) Training_Accuracy_Before.append(train_score) Testing_Accuracy_Before.append(test_score) coefficients = lasso.coef_ coefficients from sklearn.linear_model import LinearRegression logreg = LinearRegression() logreg.fit(x_train, y_train) train_score = logreg.score(x_train, y_train) print(train_score) test_score = logreg.score(x_test, y_test) print(test_score) Training_Accuracy_After.append(train_score) Testing_Accuracy_After.append(test_score) # Performing GridSearchCV with Cross Validation technique on Lasso Regression and finding the optimum value of alpha params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08 lasso = Lasso(normalize=True) lasso_model = GridSearchCV(lasso, params, cv = 10) lasso_model.fit(x_train, y_train) print(lasso_model.best_params_) print(lasso_model.best_score_) # Using value of alpha as 0.009545 to get best accuracy for Lasso Regression lasso = Lasso(alpha = 0.009545, normalize = True) lasso.fit(x_train, y_train) train_score = lasso.score(x_train, y_train) print(train_score) test_score = lasso.score(x_test, y_test) print(test_score) Training_Accuracy_After.append(train_score) Testing_Accuracy_After.append(test_score) # Performing GridSearchCV with Cross Validation technique on Ridge Regression and finding the optimum value of alpha params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08 ridge = Ridge(normalize=True) ridge_model = GridSearchCV(ridge, params, cv = 10) ridge_model.fit(x_train, y_train) print(ridge_model.best_params_) print(ridge_model.best_score_) # Using value of alpha as 1.204503 to get best accuracy for Ridge Regression ridge = Ridge(alpha = 1.204503, normalize = True) ridge.fit(x_train, y_train) train_score = ridge.score(x_train, y_train) print(train_score) test_score = ridge.score(x_test, y_test) print(test_score) Training_Accuracy_After.append(train_score) Testing_Accuracy_After.append(test_score) plt.figure(figsize=(50,10)) plt.plot(Training_Accuracy_Before, label = 'Training_Accuracy_Before') plt.plot(Training_Accuracy_After, label = 'Training_Accuracy_After') plt.xticks(range(len(Models)), Models, Rotation = 45) plt.title('Training Accuracy Behaviour') plt.legend() plt.show() plt.figure(figsize=(50,10)) plt.plot(Testing_Accuracy_Before, label = 'Testing_Accuracy_Before') plt.plot(Testing_Accuracy_After, label = 'Testing_Accuracy_After') plt.xticks(range(len(Models)), Models, Rotation = 45) plt.title('Testing Accuracy Behaviour') plt.legend() plt.show()
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
**DANGER** **ZONE**
#list of alpha for tuning params = {'alpha' : [0.001 , 0.001,0.01,0.05, 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,.9, 1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0, 10.0,20,30,40,50,100,500,1000]} ridge = Ridge() # cross validation folds = 5 model_cv = GridSearchCV(estimator = ridge, param_grid = params, scoring = 'neg_mean_absolute_error', cv = folds, return_train_score = True, verbose = 1) model_cv.fit(x_train,y_train) #Checking the value of optimum number of parameters print(model_cv.best_params_) print(model_cv.best_score_) cv_results = pd.DataFrame(model_cv.cv_results_) cv_results = cv_results[cv_results['param_alpha']<=1000] cv_results # plotting mean test and train scoes with alpha cv_results['param_alpha'] = cv_results['param_alpha'].astype('int32') plt.figure(figsize=(16,5)) # plotting plt.plot(cv_results['param_alpha'], cv_results['mean_train_score']) plt.plot(cv_results['param_alpha'], cv_results['mean_test_score']) plt.xlabel('alpha') plt.ylabel('Negative Mean Absolute Error') plt.title("Negative Mean Absolute Error and alpha") plt.legend(['train score', 'test score'], loc='upper right') plt.show()
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Insights:
alpha = 4 ridge = Ridge(alpha=alpha) ridge.fit(x_train,y_train) ridge.coef_
_____no_output_____
Apache-2.0
GAN Model/Logistic_regression_Chandan_kumar.ipynb
MrTONYCHAN/xyz
Selected Economic Characteristics: Employment Status from the American Community Survey**[Work in progress]**This notebook downloads [selected economic characteristics (DP03)](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) from the American Community Survey 2018 5-Year Data.Data source: [American Community Survey 5-Year Data 2018](https://www.census.gov/data/developers/data-sets/acs-5year.html)Authors: Peter Rose (pwrose@ucsd.edu), Ilya Zaslavsky (zaslavsk@sdsc.edu)
import os import pandas as pd from pathlib import Path import time pd.options.display.max_rows = None # display all rows pd.options.display.max_columns = None # display all columsns NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT')) print(NEO4J_IMPORT)
/Users/peter/Library/Application Support/com.Neo4j.Relate/data/dbmss/dbms-8bf637fc-0d20-4d9f-9c6f-f7e72e92a4da/import
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Download selected variables* [Selected economic characteristics for US](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03)* [List of variables as HTML](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03.html) or [JSON](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03/)* [Description of variables](https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/2018_ACSSubjectDefinitions.pdf)* [Example URLs for API](https://api.census.gov/data/2018/acs/acs5/profile/examples.html) Specify variables from DP03 group and assign property namesNames must follow the [Neo4j property naming conventions](https://neo4j.com/docs/getting-started/current/graphdb-concepts/graphdb-naming-rules-and-recommendations).
variables = {# EMPLOYMENT STATUS 'DP03_0001E': 'population16YearsAndOver', 'DP03_0002E': 'population16YearsAndOverInLaborForce', 'DP03_0002PE': 'population16YearsAndOverInLaborForcePct', 'DP03_0003E': 'population16YearsAndOverInCivilianLaborForce', 'DP03_0003PE': 'population16YearsAndOverInCivilianLaborForcePct', 'DP03_0006E': 'population16YearsAndOverInArmedForces', 'DP03_0006PE': 'population16YearsAndOverInArmedForcesPct', 'DP03_0007E': 'population16YearsAndOverNotInLaborForce', 'DP03_0007PE': 'population16YearsAndOverNotInLaborForcePct' #'DP03_0014E': 'ownChildrenOfTheHouseholderUnder6Years', #'DP03_0015E': 'ownChildrenOfTheHouseholderUnder6YearsAllParentsInLaborForce', #'DP03_0016E': 'ownChildrenOfTheHouseholder6To17Years', #'DP03_0017E': 'ownChildrenOfTheHouseholder6To17YearsAllParentsInLaborForce', } fields = ",".join(variables.keys()) for v in variables.values(): print('e.' + v + ' = toInteger(row.' + v + '),') print(len(variables.keys()))
9
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Download county-level data using US Census API
url_county = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=county:*' df = pd.read_json(url_county, dtype='str') df.fillna('', inplace=True) df.head()
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Add column names
df = df[1:].copy() # skip first row of labels columns = list(variables.values()) columns.append('stateFips') columns.append('countyFips') df.columns = columns
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Remove Puerto Rico (stateFips = 72) to limit data to US StatesTODO handle data for Puerto Rico (GeoNames represents Puerto Rico as a country)
df.query("stateFips != '72'", inplace=True)
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Save list of state fips (required later to get tract data by state)
stateFips = list(df['stateFips'].unique()) stateFips.sort() print(stateFips) df.head() # Example data df[(df['stateFips'] == '06') & (df['countyFips'] == '073')] df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'Admin2'
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentAdmin2.csv", index=False)
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Download zip-level data using US Census API
url_zip = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=zip%20code%20tabulation%20area:*' df = pd.read_json(url_zip, dtype='str') df.fillna('', inplace=True) df.head()
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Add column names
df = df[1:].copy() # skip first row columns = list(variables.values()) columns.append('stateFips') columns.append('postalCode') df.columns = columns df.head() # Example data df.query("postalCode == '90210'") df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'PostalCode'
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentZip.csv", index=False)
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Download tract-level data using US Census APITract-level data are only available by state, so we need to loop over all states.
def get_tract_data(state): url_tract = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=tract:*&in=state:{state}' df = pd.read_json(url_tract, dtype='str') time.sleep(1) # skip first row of labels df = df[1:].copy() # Add column names columns = list(variables.values()) columns.append('stateFips') columns.append('countyFips') columns.append('tract') df.columns = columns return df df = pd.concat((get_tract_data(state) for state in stateFips)) df.fillna('', inplace=True) df['tract'] = df['stateFips'] + df['countyFips'] + df['tract'] df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'Tract' # Example data for San Diego County df[(df['stateFips'] == '06') & (df['countyFips'] == '073')].head()
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Save data
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentTract.csv", index=False) df.shape
_____no_output_____
MIT
notebooks/dataprep/03a-USCensusDP03Employment.ipynb
yogeshchaudhari/covid-19-community
Settings
%env TF_KERAS = 1 import os sep_local = os.path.sep import sys # sys.path.append('..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+ sep_local + '..') # For Windows import # os.chdir('..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+ sep_local + '..') # For Linux import os.chdir('/content/Generative_Models/') print(sep_local) print(os.getcwd()) import tensorflow as tf print(tf.__version__)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Dataset loading
dataset_name='atari_pacman' images_dir = IMG_DIR # images_dir = '/home/azeghost/datasets/.mspacman/atari_v1/screens/mspacman' #Linux #images_dir = 'C:\\projects\\pokemon\DS06\\' validation_percentage = 25 valid_format = 'png' from training.generators.file_image_generator import create_image_lists, get_generators imgs_list = create_image_lists( image_dir=images_dir, validation_pct=validation_percentage, valid_imgae_formats=valid_format, verbose=0 ) scale=1 image_size=(160//scale, 210//scale, 3) batch_size = 10 EPIS_LEN = 10 EPIS_SHIFT = 5 inputs_shape = image_size latents_dim = 30 intermediate_dim = 30 training_generator, testing_generator = get_generators( images_list=imgs_list, image_dir=images_dir, image_size=image_size, batch_size=batch_size, class_mode='episode_flat', episode_len=EPIS_LEN, episode_shift=EPIS_SHIFT ) import tensorflow as tf train_ds = tf.data.Dataset.from_generator( lambda: training_generator, output_types=(tf.float32, tf.float32) , output_shapes=(tf.TensorShape((batch_size* EPIS_LEN, ) + image_size), tf.TensorShape((batch_size* EPIS_LEN, ) + image_size) ) ) test_ds = tf.data.Dataset.from_generator( lambda: testing_generator, output_types=(tf.float32, tf.float32) , output_shapes=(tf.TensorShape((batch_size* EPIS_LEN, ) + image_size), tf.TensorShape((batch_size* EPIS_LEN, ) + image_size) ) ) _instance_scale=1.0 for data in train_ds: _instance_scale = float(data[0].numpy().max()) break _instance_scale = 1.0 import numpy as np from collections.abc import Iterable if isinstance(inputs_shape, Iterable): _outputs_shape = np.prod(inputs_shape) inputs_shape
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Model's Layers definition
# tdDense = lambda **kwds: tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(**kwds)) # enc_lays = [tdDense(units=intermediate_dim//2, activation='relu'), # tdDense(units=intermediate_dim//2, activation='relu'), # tf.keras.layers.Flatten(), # tf.keras.layers.Dense(units=latents_dim)] # dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'), # tf.keras.layers.Reshape(inputs_shape), # tdDense(units=intermediate_dim, activation='relu'), # tdDense(units=_outputs_shape), # tf.keras.layers.Reshape(inputs_shape) # ] enc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'), tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=latents_dim)] dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'), tf.keras.layers.Dense(units=3*intermediate_dim//2, activation='relu'), tf.keras.layers.Dense(units=_outputs_shape), tf.keras.layers.Reshape(inputs_shape)]
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Model definition
model_name = dataset_name+'AE_Dense_reconst_ell' #windows #experiments_dir='..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+sep_local+'experiments'+sep_local + model_name #linux experiments_dir=os.getcwd()+ sep_local +'experiments'+sep_local + model_name from training.autoencoding_basic.transformative.AE import autoencoder as AE # inputs_shape=image_size variables_params = \ [ { 'name': 'inference', 'inputs_shape':inputs_shape, 'outputs_shape':latents_dim, 'layers': enc_lays } , { 'name': 'generative', 'inputs_shape':latents_dim, 'outputs_shape':inputs_shape, 'layers':dec_lays } ] from os.path import abspath from utils.data_and_files.file_utils import create_if_not_exist _restore = os.path.join(experiments_dir, 'var_save_dir') create_if_not_exist(_restore) absolute = abspath(_restore) print("Restore_dir",absolute) absolute = abspath(experiments_dir) print("Recording_dir",absolute) print("Current working dir",os.getcwd()) #to restore trained model, set filepath=_restore ae = AE( name=model_name, latents_dim=latents_dim, batch_size=batch_size * EPIS_LEN, episode_len= 1, variables_params=variables_params, filepath=_restore ) #ae.compile(metrics=None) ae.compile()
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Callbacks
from training.callbacks.sample_generation import SampleGeneration from training.callbacks.save_model import ModelSaver es = tf.keras.callbacks.EarlyStopping( monitor='loss', min_delta=1e-12, patience=12, verbose=1, restore_best_weights=False ) ms = ModelSaver(filepath=_restore) csv_dir = os.path.join(experiments_dir, 'csv_dir') create_if_not_exist(csv_dir) csv_dir = os.path.join(csv_dir, model_name+'.csv') csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True) absolute = abspath(csv_dir) print("Csv_dir",absolute) image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir') create_if_not_exist(image_gen_dir) absolute = abspath(image_gen_dir) print("Image_gen_dir",absolute) sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Model Training
ae.fit( x=train_ds, input_kw=None, steps_per_epoch=10, epochs=10, verbose=2, callbacks=[ es, ms, csv_log, sg], workers=-1, use_multiprocessing=True, validation_data=test_ds, validation_steps=10 )
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Model Evaluation inception_score
from evaluation.generativity_metrics.inception_metrics import inception_score is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200) print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Frechet_inception_distance
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32) print(f'frechet inception distance: {fis_score}')
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
perceptual_path_length_score
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32) print(f'perceptual path length score: {ppl_mean_score}')
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
precision score
from evaluation.generativity_metrics.precision_recall import precision_score _precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200) print(f'precision score: {_precision_score}')
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
recall score
from evaluation.generativity_metrics.precision_recall import recall_score _recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200) print(f'recall score: {_recall_score}')
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Image Generation image reconstruction Training dataset
%load_ext autoreload %autoreload 2 from training.generators.image_generation_testing import reconstruct_from_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir') create_if_not_exist(save_dir) reconstruct_from_a_batch(ae, training_generator, save_dir) from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir') create_if_not_exist(save_dir) reconstruct_from_a_batch(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
with Randomness
from training.generators.image_generation_testing import generate_images_like_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir') create_if_not_exist(save_dir) generate_images_like_a_batch(ae, training_generator, save_dir) from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir') create_if_not_exist(save_dir) generate_images_like_a_batch(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Complete Randomness
from training.generators.image_generation_testing import generate_images_randomly from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'random_synthetic_dir') create_if_not_exist(save_dir) generate_images_randomly(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
Stacked inputs outputs and predictions
from training.generators.image_generation_testing import predict_from_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'predictions') create_if_not_exist(save_dir) predict_from_a_batch(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/Atari/Pacman_Colab/Transformative/Dense/AE/pacman_AE_Dense_reconst_ellwlb_episode_flat_working.ipynb
azeghost/Generative_Models
DS106 Machine Learning : Lesson Nine Companion Notebook Table of Contents * [Table of Contents](DS106L9_toc) * [Page 1 - Introduction](DS106L9_page_1) * [Page 2 - What are Bayesian Statistics?](DS106L9_page_2) * [Page 3 - Bayes Theorem](DS106L9_page_3) * [Page 4 - Parts of Bayes Theorem](DS106L9_page_4) * [Page 5 - A/B Testing](DS106L9_page_5) * [Page 6 - Bayesian Network Basics](DS106L9_page_6) * [Page 7 - Key Terms](DS106L9_page_7) * [Page 8 - Lesson 4 Practice Hands-On](DS106L9_page_8) * [Page 9 - Lesson 4 Practice Hands-On Solution](DS106L9_page_9) Page 1 - Overview of this Module[Back to Top](DS106L9_toc)
from IPython.display import VimeoVideo # Tutorial Video Name: Bayesian Networks VimeoVideo('388131444', width=720, height=480)
_____no_output_____
MIT
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/05_MACHINE_LEARNING/ML/ML04.ipynb
okara83/Becoming-a-Data-Scientist
Dimensionality Reduction
from sklearn.decomposition import PCA
_____no_output_____
MIT
Practical-06-2. Exploration.ipynb
kingsgeocomp/applied_gsa
Principal Components Analysis
o_dir = os.path.join('outputs','pca') if os.path.isdir(o_dir) is not True: print("Creating '{0}' directory.".format(o_dir)) os.mkdir(o_dir) pca = PCA() # Use all Principal Components pca.fit(scdf) # Train model on all data pcdf = pd.DataFrame(pca.transform(scdf)) # Transform data using model for i in range(0,21): print("Amount of explained variance for component {0} is: {1:6.2f}%".format(i, pca.explained_variance_ratio_[i]*100)) print("The amount of explained variance of the SES score using each component is...") sns.lineplot(x=list(range(1,len(pca.explained_variance_ratio_)+1)), y=pca.explained_variance_ratio_) pca = PCA(n_components=11) pca.fit(scdf) scores = pd.DataFrame(pca.transform(scdf), index=scdf.index) scores.to_csv(os.path.join(o_dir,'Scores.csv.gz'), compression='gzip', index=True) # Adapted from https://stackoverflow.com/questions/22984335/recovering-features-names-of-explained-variance-ratio-in-pca-with-sklearn i = np.identity(scdf.shape[1]) # identity matrix coef = pca.transform(i) loadings = pd.DataFrame(coef, index=scdf.columns) loadings.to_csv(os.path.join(o_dir,'Loadings.csv.gz'), compression='gzip', index=True) print(scores.shape) scores.sample(5, random_state=42) print(loadings.shape) loadings.sample(5, random_state=42) odf = pd.DataFrame(columns=['Variable','Component Loading','Score']) for i in range(0,len(loadings.index)): row = loadings.iloc[i,:] for c in list(loadings.columns.values): d = {'Variable':loadings.index[i], 'Component Loading':c, 'Score':row[c]} odf = odf.append(d, ignore_index=True) g = sns.FacetGrid(odf, col="Variable", col_wrap=4, height=3, aspect=2.0, margin_titles=True, sharey=True) g = g.map(plt.plot, "Component Loading", "Score", marker=".")
_____no_output_____
MIT
Practical-06-2. Exploration.ipynb
kingsgeocomp/applied_gsa
What Have We Done?
sns.set_style('white') sns.jointplot(data=scores, x=0, y=1, kind='hex', height=8, ratio=8)
_____no_output_____
MIT
Practical-06-2. Exploration.ipynb
kingsgeocomp/applied_gsa
Create an Output Directory and Load the Data
o_dir = os.path.join('outputs','clusters-pca') if os.path.isdir(o_dir) is not True: print("Creating '{0}' directory.".format(o_dir)) os.mkdir(o_dir) score_df = pd.read_csv(os.path.join('outputs','pca','Scores.csv.gz')) score_df.rename(columns={'Unnamed: 0':'lsoacd'}, inplace=True) score_df.set_index('lsoacd', inplace=True) # Ensures that df is initialised but original scores remain accessible df = score_df.copy(deep=True) score_df.describe() score_df.sample(3, random_state=42)
_____no_output_____
MIT
Practical-06-2. Exploration.ipynb
kingsgeocomp/applied_gsa
Rescale the Loaded DataWe need this so that differences in the component scores don't cause the clustering algorithms to focus only on the 1st component.
scaler = preprocessing.MinMaxScaler() df[df.columns] = scaler.fit_transform(df[df.columns]) df.describe() df.sample(3, random_state=42)
_____no_output_____
MIT
Practical-06-2. Exploration.ipynb
kingsgeocomp/applied_gsa
The model$u(c) = log(c)$ utility function $y = 1$ Deterministic income $p(r = 0.02) = 0.5$ $p(r = -0.02) = 0.5$ value iteration
# infinite horizon MDP problem %pylab inline import numpy as np from scipy.optimize import minimize def u(c): return np.log(c) # discounting factor beta = 0.95 # wealth level w_low = 0 w_high = 10 # interest rate r = 0.02 # deterministic income y = 1 # good state and bad state economy with equal probability 0.5 # with good investment return 0.05 or bad investment return -0.05 ws = np.linspace(0.001,10**(1/2),100)**2 Vs = np.zeros(100) Cs = np.zeros(100) # Value iteration for j in range(50): if j % 10 == 0: print(j) for i in range(len(ws)): w = ws[i] def obj(c): return -(u(c) + beta*(np.interp((y+w-c)*(1+r), ws, Vs) + np.interp((y+w-c)*(1-r), ws, Vs))/2) bounds = [(0.0001, y+w-0.0001)] res = minimize(obj, 0.0001, method='SLSQP', bounds=bounds) Cs[i] = res.x[0] Vs[i] = -res.fun plt.plot(ws,Vs) plt.plot(ws,Cs)
_____no_output_____
MIT
20210115/.ipynb_checkpoints/policyGradient -checkpoint.ipynb
dongxulee/lifeCycle
policy gradientAssume the policy form $\theta = (a,b,c, \sigma)$, then $\pi_\theta$ ~ $N(log(ax+b)+c, \sigma)$Assume the initial value $a = 1$, $b = 1$, $c = 1$, $\sigma = 1$ $$\theta_{k+1} = \theta_{k} + \alpha \nabla_\theta V(\pi_\theta)|\theta_k$$
# simulation step T = 100 T = 10 def mu(theta, w): return np.log(theta[0] * w + theta[1]) + theta[2] def simSinglePath(theta): wPath = np.zeros(T) aPath = np.zeros(T) rPath = np.zeros(T) w = np.random.choice(ws) for t in range(T): c = np.random.normal(mu(theta, w), theta[3]) while c < 0.0001 or c > w+y-0.0001: c = np.random.normal(mu(theta, w), theta[3]) wPath[t] = w aPath[t] = c rPath[t] = np.log(c)*(beta**t) if np.random.uniform(0,1) > 0.5: w = (w+y-c) * (1+r) else: w = (w+y-c) * (1-r) return wPath, aPath, rPath def gradientV(theta, D = 100): ''' D is the sample size ''' grad = np.zeros(len(theta)) newGrad = np.zeros(len(theta)) for d in range(D): wp, ap, rp = simSinglePath(theta) newGrad[0] = np.sum((ap - mu(theta, wp))/(theta[3]**2)*(w/(theta[0]*w + theta[1]))) newGrad[1] = np.sum((ap - mu(theta, wp))/(theta[3]**2)*(1/(theta[0]*w + theta[1]))) newGrad[2] = np.sum((ap - mu(theta, wp))/(theta[3]**2)) #newGrad[3] = np.sum((((ap - mu(theta, wp))**2 - theta[3]**2)/(theta[3]**3))) grad += newGrad * np.sum(rp) grad /= D grad[-1] = 0 return grad def updateTheta(theta): theta = theta + alpha * gradientV(theta) return theta import time def plot(theta): def f(x): return np.log(theta[0]*x + theta[1]) + theta[2] plt.plot(ws, Cs, 'b') plt.plot(ws, f(ws), 'r') # c < 0 or c > w + 5, then reward -100 # initial theta theta = [1,1,1,0.1] # gradient ascend step size alpha = 0.001 # store theta THETA = np.zeros((3,10000)) for i in range(10000): theta = updateTheta(theta) THETA[:,i] = theta[:3] plot(theta) theta = [0.4, 1.00560229, 0.74852663, 0.1 ] plt.plot(THETA[0,:]) plt.plot(THETA[1,:]) plt.plot(THETA[2,:]) def V(theta, w, D = 100): def sPath(theta, w): wPath = np.zeros(T) aPath = np.zeros(T) rPath = np.zeros(T) for t in range(T): c = np.random.normal(mu(theta, w), theta[3]) while c < 0.0001 or c > w+y-0.0001: c = np.random.normal(mu(theta, w), theta[3]) wPath[t] = w aPath[t] = c rPath[t] = np.log(c)*(beta**t) if np.random.uniform(0,1) > 0.5: w = (w+y-c) * (1+r) else: w = (w+y-c) * (1-r) return wPath, aPath, rPath value = 0 for d in range(D): _,_,rp = sPath(theta,w) value += np.sum(rp) return value/D
_____no_output_____
MIT
20210115/.ipynb_checkpoints/policyGradient -checkpoint.ipynb
dongxulee/lifeCycle
Copyright 2019 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
TFRecord and tf.Example View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook To read data efficiently it can be helpful to serialize your data and store it in a set of files (100-200MB each) that can each be read linearly. This is especially true if the data is being streamed over a network. This can also be useful for caching any data-preprocessing.The TFRecord format is a simple format for storing a sequence of binary records.[Protocol buffers](https://developers.google.com/protocol-buffers/) are a cross-platform, cross-language library for efficient serialization of structured data.Protocol messages are defined by `.proto` files, these are often the easiest way to understand a message type.The `tf.Example` message (or protobuf) is a flexible message type that represents a `{"string": value}` mapping. It is designed for use with TensorFlow and is used throughout the higher-level APIs such as [TFX](https://www.tensorflow.org/tfx/). This notebook will demonstrate how to create, parse, and use the `tf.Example` message, and then serialize, write, and read `tf.Example` messages to and from `.tfrecord` files.Note: While useful, these structures are optional. There is no need to convert existing code to use TFRecords, unless you are using [`tf.data`](https://www.tensorflow.org/guide/datasets) and reading data is still the bottleneck to training. See [Data Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets) for dataset performance tips. Setup
!pip install tf-nightly import tensorflow as tf import numpy as np import IPython.display as display
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
`tf.Example` Data types for `tf.Example` Fundamentally, a `tf.Example` is a `{"string": tf.train.Feature}` mapping.The `tf.train.Feature` message type can accept one of the following three types (See the [`.proto` file](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for reference). Most other generic types can be coerced into one of these:1. `tf.train.BytesList` (the following types can be coerced) - `string` - `byte`1. `tf.train.FloatList` (the following types can be coerced) - `float` (`float32`) - `double` (`float64`)1. `tf.train.Int64List` (the following types can be coerced) - `bool` - `enum` - `int32` - `uint32` - `int64` - `uint64` In order to convert a standard TensorFlow type to a `tf.Example`-compatible `tf.train.Feature`, you can use the shortcut functions below. Note that each function takes a scalar input value and returns a `tf.train.Feature` containing one of the three `list` types above:
# The following functions can be used to convert a value to a type compatible # with tf.Example. def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
Note: To stay simple, this example only uses scalar inputs. The simplest way to handle non-scalar features is to use `tf.serialize_tensor` to convert tensors to binary-strings. Strings are scalars in tensorflow. Use `tf.parse_tensor` to convert the binary-string back to a tensor. Below are some examples of how these functions work. Note the varying input types and the standardized output types. If the input type for a function does not match one of the coercible types stated above, the function will raise an exception (e.g. `_int64_feature(1.0)` will error out, since `1.0` is a float, so should be used with the `_float_feature` function instead):
print(_bytes_feature(b'test_string')) print(_bytes_feature(u'test_bytes'.encode('utf-8'))) print(_float_feature(np.exp(1))) print(_int64_feature(True)) print(_int64_feature(1))
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
All proto messages can be serialized to a binary-string using the `.SerializeToString` method:
feature = _float_feature(np.exp(1)) feature.SerializeToString()
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
Creating a `tf.Example` message Suppose you want to create a `tf.Example` message from existing data. In practice, the dataset may come from anywhere, but the procedure of creating the `tf.Example` message from a single observation will be the same:1. Within each observation, each value needs to be converted to a `tf.train.Feature` containing one of the 3 compatible types, using one of the functions above.1. You create a map (dictionary) from the feature name string to the encoded feature value produced in 1.1. The map produced in step 2 is converted to a [`Features` message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.protoL85). In this notebook, you will create a dataset using NumPy.This dataset will have 4 features:* a boolean feature, `False` or `True` with equal probability* an integer feature uniformly randomly chosen from `[0, 5]`* a string feature generated from a string table by using the integer feature as an index* a float feature from a standard normal distributionConsider a sample consisting of 10,000 independently and identically distributed observations from each of the above distributions:
# The number of observations in the dataset. n_observations = int(1e4) # Boolean feature, encoded as False or True. feature0 = np.random.choice([False, True], n_observations) # Integer feature, random from 0 to 4. feature1 = np.random.randint(0, 5, n_observations) # String feature strings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat']) feature2 = strings[feature1] # Float feature, from a standard normal distribution feature3 = np.random.randn(n_observations)
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs
Each of these features can be coerced into a `tf.Example`-compatible type using one of `_bytes_feature`, `_float_feature`, `_int64_feature`. You can then create a `tf.Example` message from these encoded features:
def serialize_example(feature0, feature1, feature2, feature3): """ Creates a tf.Example message ready to be written to a file. """ # Create a dictionary mapping the feature name to the tf.Example-compatible # data type. feature = { 'feature0': _int64_feature(feature0), 'feature1': _int64_feature(feature1), 'feature2': _bytes_feature(feature2), 'feature3': _float_feature(feature3), } # Create a Features message using tf.train.Example. example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString()
_____no_output_____
Apache-2.0
site/en/tutorials/load_data/tfrecord.ipynb
blueyi/docs