markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Theano
!pip install numpy matplotlib !pip install --upgrade https://github.com/Theano/Theano/archive/master.zip !pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Разминка
import theano import theano.tensor as T %pylab inline
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
будущий параметр функции -- символьная переменная
N = T.scalar('a dimension', dtype='float32')
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
рецепт получения квадрата -- орперации над символьными переменным
result = T.power(N, 2)
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
theano.grad(cost, wrt)
grad_result = theano.grad(result, N)
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
компиляция функции "получения квадрата"
sq_function = theano.function(inputs=[N], outputs=result) gr_function = theano.function(inputs=[N], outputs=grad_result)
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
применение функции
# Заводим np.array x xv = np.arange(-10, 10) # Применяем функцию к каждому x val = map(float, [sq_function(x) for x in xv]) # Посичтаем градиент в кажой точке grad = map(float, [gr_function(x) for x in xv])
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Что мы увидим если нарисуем функцию и градиент?
pylab.plot(xv, val, label='x*x') pylab.plot(xv, grad, label='d x*x / dx') pylab.legend()
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Lasagne* lasagne - это библиотека для написания нейронок произвольной формы на theano* В качестве демо-задачи выберем то же распознавание чисел, но на большем масштабе задачи, картинки 28x28, 10 цифр
from mnist import load_dataset X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() print 'X размера', X_train.shape, 'y размера', y_train.shape fig, axes = plt.subplots(nrows=1, ncols=7, figsize=(20, 20)) for i, ax in enumerate(axes): ax.imshow(X_train[i, 0], cmap='gray')
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Давайте посмотрим на DenseLayer в lasagne- http://lasagne.readthedocs.io/en/latest/modules/layers/dense.html- https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.pyL16-L124 - Весь содаржательный код тут https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.pyL121
import lasagne from lasagne import init from theano import tensor as T from lasagne.nonlinearities import softmax X, y = T.tensor4('X'), T.vector('y', 'int32')
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Так задаётся архитектура нейронки
#входной слой (вспомогательный) net = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=X) net = lasagne.layers.Conv2DLayer(net, 15, 28, pad='valid', W=init.Constant()) # сверточный слой net = lasagne.layers.Conv2DLayer(net, 10, 2, pad='full', W=init.Constant()) # сверточный слой net = lasagne.layers.DenseLayer(net, num_units=500) # полносвязный слой net = lasagne.layers.DropoutLayer(net, 1.0) # регуляризатор net = lasagne.layers.DenseLayer(net, num_units=200) # полносвязный слой net = lasagne.layers.DenseLayer(net, num_units=10) # полносвязный слой #предсказание нейронки (theano-преобразование) y_predicted = lasagne.layers.get_output(net) #все веса нейронки (shared-переменные) all_weights = lasagne.layers.get_all_params(net) print all_weights #функция ошибки и точности будет прямо внутри loss = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() #сразу посчитать словарь обновлённых значений с шагом по градиенту, как раньше updates = lasagne.updates.momentum(loss, all_weights, learning_rate=1.0, momentum=1.5) #функция, делает updates и возвращащет значение функции потерь и точности train_fun = theano.function([X, y], [loss, accuracy], updates=updates) accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Процесс обучения
import time from mnist import iterate_minibatches num_epochs = 5 #количество проходов по данным batch_size = 50 #размер мини-батча for epoch in range(num_epochs): train_err, train_acc, train_batches, start_time = 0, 0, 0, time.time() for inputs, targets in iterate_minibatches(X_train, y_train, batch_size): train_err_batch, train_acc_batch = train_fun(inputs, targets) train_err += train_err_batch train_acc += train_acc_batch train_batches += 1 val_acc, val_batches = 0, 0 for inputs, targets in iterate_minibatches(X_test, y_test, batch_size): val_acc += accuracy_fun(inputs, targets) val_batches += 1 print "Epoch %s of %s took %.3f s" % (epoch + 1, num_epochs, time.time() - start_time) print " train loss:\t %.3f" % (train_err / train_batches) print " train acc:\t %.3f" % (train_acc * 100 / train_batches), '%' print " test acc:\t %.3f" % (val_acc * 100 / val_batches), '%' print test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
Ансамблирование с DropOut
#предсказание нейронки (theano-преобразование) y_predicted = T.mean([lasagne.layers.get_output(net, deterministic=False) for i in range(10)], axis=0) accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
_____no_output_____
Apache-2.0
sem2-classify&generate/1_my_first_nn_lsagne.ipynb
bayesgroup/deepbayes2017
查看当前GPU信息
from tensorflow.python.client import device_lib device_lib.list_local_devices() !pip install bert-tensorflow import pandas as pd import tensorflow as tf import tensorflow_hub as hub import pickle import bert from bert import run_classifier from bert import optimization from bert import tokenization def pretty_print(result): df = pd.DataFrame([result]).T df.columns = ["values"] return df def create_tokenizer_from_hub_module(bert_model_hub): """Get the vocab file and casing info from the Hub module.""" with tf.Graph().as_default(): bert_module = hub.Module(bert_model_hub) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"], tokenization_info["do_lower_case"]]) return bert.tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def make_features(dataset, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN): input_example = dataset.apply(lambda x: bert.run_classifier.InputExample(guid=None, text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) features = bert.run_classifier.convert_examples_to_features(input_example, label_list, MAX_SEQ_LENGTH, tokenizer) return features def create_model(bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, labels, num_labels): """Creates a classification model.""" bert_module = hub.Module( bert_model_hub, trainable=True) bert_inputs = dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) bert_outputs = bert_module( inputs=bert_inputs, signature="tokens", as_dict=True) # Use "pooled_output" for classification tasks on an entire sentence. # Use "sequence_outputs" for token-level output. output_layer = bert_outputs["pooled_output"] hidden_size = output_layer.shape[-1].value # Create our own layer to tune for politeness data. output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): # Dropout helps prevent overfitting output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) # Convert labels into one-hot encoding one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32)) # If we're predicting, we want predicted labels and the probabiltiies. if is_predicting: return (predicted_labels, log_probs) # If we're train/eval, compute loss between predicted and actual label per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, predicted_labels, log_probs) # model_fn_builder actually creates our model function # using the passed parameters for num_labels, learning_rate, etc. def model_fn_builder(bert_model_hub, num_labels, learning_rate, num_train_steps, num_warmup_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: (loss, predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) # Calculate evaluation metrics. def metric_fn(label_ids, predicted_labels): accuracy = tf.metrics.accuracy(label_ids, predicted_labels) f1_score = tf.contrib.metrics.f1_score( label_ids, predicted_labels) auc = tf.metrics.auc( label_ids, predicted_labels) recall = tf.metrics.recall( label_ids, predicted_labels) precision = tf.metrics.precision( label_ids, predicted_labels) true_pos = tf.metrics.true_positives( label_ids, predicted_labels) true_neg = tf.metrics.true_negatives( label_ids, predicted_labels) false_pos = tf.metrics.false_positives( label_ids, predicted_labels) false_neg = tf.metrics.false_negatives( label_ids, predicted_labels) return { "eval_accuracy": accuracy, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall, "true_positives": true_pos, "true_negatives": true_neg, "false_positives": false_pos, "false_negatives": false_neg } eval_metrics = metric_fn(label_ids, predicted_labels) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: (predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) predictions = { 'probabilities': log_probs, 'labels': predicted_labels } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn def estimator_builder(bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE): # Specify outpit directory and number of checkpoint steps to save run_config = tf.estimator.RunConfig( model_dir=OUTPUT_DIR, save_summary_steps=SAVE_SUMMARY_STEPS, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS) model_fn = model_fn_builder( bert_model_hub = bert_model_hub, num_labels=len(label_list), learning_rate=LEARNING_RATE, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={"batch_size": BATCH_SIZE}) return estimator, model_fn, run_config def run_on_dfs(train, test, DATA_COLUMN, LABEL_COLUMN, MAX_SEQ_LENGTH = 128, BATCH_SIZE = 32, LEARNING_RATE = 2e-5, NUM_TRAIN_EPOCHS = 3.0, WARMUP_PROPORTION = 0.1, SAVE_SUMMARY_STEPS = 100, SAVE_CHECKPOINTS_STEPS = 10000, bert_model_hub = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"): label_list = train[LABEL_COLUMN].unique().tolist() tokenizer = create_tokenizer_from_hub_module(bert_model_hub) train_features = make_features(train, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) test_features = make_features(test, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) estimator, model_fn, run_config = estimator_builder( bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE) train_input_fn = bert.run_classifier.input_fn_builder( features=train_features, seq_length=MAX_SEQ_LENGTH, is_training=True, drop_remainder=False) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) test_input_fn = run_classifier.input_fn_builder( features=test_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False) result_dict = estimator.evaluate(input_fn=test_input_fn, steps=None) return result_dict, estimator import random random.seed(10) OUTPUT_DIR = 'output'
_____no_output_____
MIT
spam_message/spam_massage_with_bert.ipynb
yaoyue123/SocialComputing
----- 只需更改下方代码 ------ 导入数据集
!wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/training.txt !wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/validation.txt train = pd.read_table("training.txt",sep='\t',error_bad_lines=False) #mytrain= mytrain[order] test = pd.read_table("validation.txt",sep='\t',error_bad_lines=False) #mytest= mytest[order] train.head() test.head()
_____no_output_____
MIT
spam_message/spam_massage_with_bert.ipynb
yaoyue123/SocialComputing
在此更改你的参数,如标签,bert模型地址,epochs
myparam = { "DATA_COLUMN": "massage", "LABEL_COLUMN": "label", "LEARNING_RATE": 2e-5, "NUM_TRAIN_EPOCHS":1, "bert_model_hub":"https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1" }
_____no_output_____
MIT
spam_message/spam_massage_with_bert.ipynb
yaoyue123/SocialComputing
训练模型,通常情况下,一个epochs用k80训练大概在10min左右
result, estimator = run_on_dfs(train, test, **myparam)
INFO:tensorflow:Saver not created because there are no variables in the graph to restore
MIT
spam_message/spam_massage_with_bert.ipynb
yaoyue123/SocialComputing
bert模型还是比较强的,一个epochs就能达到准确率为99%
pretty_print(result)
_____no_output_____
MIT
spam_message/spam_massage_with_bert.ipynb
yaoyue123/SocialComputing
Anna KaRNNaIn this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [Sherjil Ozair](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
import time from collections import namedtuple import numpy as np import tensorflow as tf
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
First we'll load the text file and convert it into integers for our network to use.
with open('anna.txt', 'r') as f: text=f.read() vocab = set(text) vocab_to_int = {c: i for i, c in enumerate(vocab)} int_to_vocab = dict(enumerate(vocab)) chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32) text[:100] chars[:100]
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
def split_data(chars, batch_size, num_steps, split_frac=0.9): """ Split character data into training and validation sets, inputs and targets for each set. Arguments --------- chars: character array batch_size: Size of examples in each of batch num_steps: Number of sequence steps to keep in the input and pass to the network split_frac: Fraction of batches to keep in the training set Returns train_x, train_y, val_x, val_y """ slice_size = batch_size * num_steps n_batches = int(len(chars) / slice_size) # Drop the last few characters to make only full batches x = chars[: n_batches*slice_size] y = chars[1: n_batches*slice_size + 1] # Split the data into batch_size slices, then stack them into a 2D matrix x = np.stack(np.split(x, batch_size)) y = np.stack(np.split(y, batch_size)) # Now x and y are arrays with dimensions batch_size x n_batches*num_steps # Split into training and validation sets, keep the virst split_frac batches for training split_idx = int(n_batches*split_frac) train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps] val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:] return train_x, train_y, val_x, val_y train_x, train_y, val_x, val_y = split_data(chars, 10, 200) train_x.shape train_x[:,:10]
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
def get_batch(arrs, num_steps): batch_size, slice_size = arrs[0].shape n_batches = int(slice_size/num_steps) for b in range(n_batches): yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs] def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2, learning_rate=0.001, grad_clip=5, sampling=False): if sampling == True: batch_size, num_steps = 1, 1 tf.reset_default_graph() # Declare placeholders we'll feed into the graph inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs') x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot') targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets') y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot') y_reshaped = tf.reshape(y_one_hot, [-1, num_classes]) keep_prob = tf.placeholder(tf.float32, name='keep_prob') # Build the RNN layers lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size) drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers) initial_state = cell.zero_state(batch_size, tf.float32) # Run the data through the RNN layers outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state) final_state = state # Reshape output so it's a bunch of rows, one row for each cell output seq_output = tf.concat(outputs, axis=1,name='seq_output') output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output') # Now connect the RNN putputs to a softmax layer and calculate the cost softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1), name='softmax_w') softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b') logits = tf.matmul(output, softmax_w) + softmax_b preds = tf.nn.softmax(logits, name='predictions') loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss') cost = tf.reduce_mean(loss, name='cost') # Optimizer for training, using gradient clipping to control exploding gradients tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip) train_op = tf.train.AdamOptimizer(learning_rate) optimizer = train_op.apply_gradients(zip(grads, tvars)) # Export the nodes export_nodes = ['inputs', 'targets', 'initial_state', 'final_state', 'keep_prob', 'cost', 'preds', 'optimizer'] Graph = namedtuple('Graph', export_nodes) local_dict = locals() graph = Graph(*[local_dict[each] for each in export_nodes]) return graph
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
HyperparametersHere I'm defining the hyperparameters for the network. The two you probably haven't seen before are `lstm_size` and `num_layers`. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
batch_size = 100 num_steps = 100 lstm_size = 512 num_layers = 2 learning_rate = 0.001
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
Write out the graph for TensorBoard
model = build_rnn(len(vocab), batch_size=batch_size, num_steps=num_steps, learning_rate=learning_rate, lstm_size=lstm_size, num_layers=num_layers) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) file_writer = tf.summary.FileWriter('./logs/1', sess.graph)
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
TrainingTime for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint.
!mkdir -p checkpoints/anna epochs = 1 save_every_n = 200 train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps) model = build_rnn(len(vocab), batch_size=batch_size, num_steps=num_steps, learning_rate=learning_rate, lstm_size=lstm_size, num_layers=num_layers) saver = tf.train.Saver(max_to_keep=100) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Use the line below to load a checkpoint and resume training #saver.restore(sess, 'checkpoints/anna20.ckpt') n_batches = int(train_x.shape[1]/num_steps) iterations = n_batches * epochs for e in range(epochs): # Train network new_state = sess.run(model.initial_state) loss = 0 for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1): iteration = e*n_batches + b start = time.time() feed = {model.inputs: x, model.targets: y, model.keep_prob: 0.5, model.initial_state: new_state} batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer], feed_dict=feed) loss += batch_loss end = time.time() print('Epoch {}/{} '.format(e+1, epochs), 'Iteration {}/{}'.format(iteration, iterations), 'Training loss: {:.4f}'.format(loss/b), '{:.4f} sec/batch'.format((end-start))) if (iteration%save_every_n == 0) or (iteration == iterations): # Check performance, notice dropout has been set to 1 val_loss = [] new_state = sess.run(model.initial_state) for x, y in get_batch([val_x, val_y], num_steps): feed = {model.inputs: x, model.targets: y, model.keep_prob: 1., model.initial_state: new_state} batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed) val_loss.append(batch_loss) print('Validation loss:', np.mean(val_loss), 'Saving checkpoint!') saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss))) tf.train.get_checkpoint_state('checkpoints/anna')
_____no_output_____
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
SamplingNow that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
def pick_top_n(preds, vocab_size, top_n=5): p = np.squeeze(preds) p[np.argsort(p)[:-top_n]] = 0 p = p / np.sum(p) c = np.random.choice(vocab_size, 1, p=p)[0] return c def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "): prime = "Far" samples = [c for c in prime] model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkpoint) new_state = sess.run(model.initial_state) for c in prime: x = np.zeros((1, 1)) x[0,0] = vocab_to_int[c] feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.preds, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) for i in range(n_samples): x[0,0] = c feed = {model.inputs: x, model.keep_prob: 1., model.initial_state: new_state} preds, new_state = sess.run([model.preds, model.final_state], feed_dict=feed) c = pick_top_n(preds, len(vocab)) samples.append(int_to_vocab[c]) return ''.join(samples) checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt" samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp) checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt" samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far") print(samp)
Farrat, his felt has at it. "When the pose ther hor exceed to his sheant was," weat a sime of his sounsed. The coment and the facily that which had began terede a marilicaly whice whether the pose of his hand, at she was alligated herself the same on she had to taiking to his forthing and streath how to hand began in a lang at some at it, this he cholded not set all her. "Wo love that is setthing. Him anstering as seen that." "Yes in the man that say the mare a crances is it?" said Sergazy Ivancatching. "You doon think were somether is ifficult of a mone of though the most at the countes that the mean on the come to say the most, to his feesing of a man she, whilo he sained and well, that he would still at to said. He wind at his for the sore in the most of hoss and almoved to see him. They have betine the sumper into at he his stire, and what he was that at the so steate of the sound, and shin should have a geest of shall feet on the conderation to she had been at that imporsing the dre
MIT
tensorboard/Anna_KaRNNa.ipynb
smrutiranjans/deep-learning
Numpy Basic operations
import numpy as np numbers = [1, 2, 3, 4, 5] print(np.mean(numbers)) print(np.median(numbers)) print(np.std(numbers)) #This is the standard deviation
3.0 3.0 1.41421356237
Unlicense
Numpy/Introduction to Numpy.ipynb
PhillipWongSeven/Machine-Learning-Simplified
Numpy Arraies are optimize to run faster
array = np.array(numbers, float) print (array) array[1] array[:2] array[1]=10.0 print array
[ 1. 10. 3. 4. 5.]
Unlicense
Numpy/Introduction to Numpy.ipynb
PhillipWongSeven/Machine-Learning-Simplified
Numpy Array can be two dimensional
array = np.array([[1,2,3], [4,5,6]], float) print (array) print "" print array[1][1] print "" print array[1, :] print "" print array[:, 2] print "" print array[:, 1] ray = np.array([[1,2,3], [4,5,6]], float) print ray print (ray[1][2]) print (ray[1][1]) print (ray[:, 2])
[[ 1. 2. 3.] [ 4. 5. 6.]] 6.0 5.0 [ 3. 6.]
Unlicense
Numpy/Introduction to Numpy.ipynb
PhillipWongSeven/Machine-Learning-Simplified
Array arithmetics
ray1 = np.array([1, 2, 3], float) ray2 = np.array([5, 2, 6], float) print (ray1+ray2) print (ray1*ray2) print (np.mean(ray1)) print(np.dot(ray1, ray2)) array_1 = np.array([1, 2, 3], float) array_2 = np.array([5, 2, 6], float) array_1 + array_2
_____no_output_____
Unlicense
Numpy/Introduction to Numpy.ipynb
PhillipWongSeven/Machine-Learning-Simplified
Array multipliation
array_1 * array_2
_____no_output_____
Unlicense
Numpy/Introduction to Numpy.ipynb
PhillipWongSeven/Machine-Learning-Simplified
Make logistic Regression model with MNIST
mnist = input_data.read_data_sets('data/',one_hot = True) trainimg = mnist.train.images trainLabel = mnist.train.labels testimg = mnist.test.images testLabel = mnist.test.labels print("MNIST Loaded") x = tf.placeholder('float', [None, 784]) y = tf.placeholder('float', [None, 10]) w = tf.Variable(tf.random_normal([784,10])) b = tf.Variable(tf.random_normal([10])) #Logistic Regression Model activ = tf.nn.softmax(tf.matmul(x,w)+b) #cost function cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(activ))) #optimizer optm = tf.train.GradientDescentOptimizer(0.01).minimize(cost) #prediction pred = tf.equal(tf.arg_max(activ,1), tf.arg_max(y, 1)) #accuracy accr = tf.reduce_mean(tf.cast(pred, "float")) init = tf.initialize_all_variables training_epochs = 10 batch_size = 100 display_step = 2 # SESSION sess = tf.Session() sess.run(tf.initialize_all_variables()) # MINI-BATCH LEARNING for epoch in range(training_epochs): avg_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) for i in range(num_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) feeds = {x: batch_xs, y: batch_ys} avg_cost += sess.run(cost, feed_dict=feeds)/num_batch # DISPLAY if epoch % display_step == 0: feeds_train = {x: batch_xs, y: batch_ys} feeds_test = {x: mnist.test.images, y: mnist.test.labels} train_acc = sess.run(accr, feed_dict=feeds_train) test_acc = sess.run(accr, feed_dict=feeds_test) print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f test_acc: %.3f" % (epoch, training_epochs, avg_cost, train_acc, test_acc)) print ("DONE")
Epoch: 000/010 cost: 75.560586708 train_acc: 0.910 test_acc: 0.865 Epoch: 002/010 cost: 32.222071790 train_acc: 0.910 test_acc: 0.895 Epoch: 004/010 cost: 27.124585262 train_acc: 0.930 test_acc: 0.903 Epoch: 006/010 cost: 24.715282281 train_acc: 0.940 test_acc: 0.911 Epoch: 008/010 cost: 23.033731372 train_acc: 0.910 test_acc: 0.914 DONE
MIT
week3 logistic Regression.ipynb
SongChiyoon/study-Tensorflow
Change sys.path to use my tensortrade instead of the one in env
import sys sys.path.append("/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader") print(sys.path)
['/usr/local/opt/python/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/opt/python/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/opt/python/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '', '/Users/jasonfiacco/Documents/Yale/Senior/thesis/env2/lib/python3.6/site-packages', '/Users/jasonfiacco/Documents/Yale/Senior/thesis/env2/lib/python3.6/site-packages/IPython/extensions', '/Users/jasonfiacco/.ipython', '/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader']
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Read PredictIt Data Instead
import ssl import pandas as pd ssl._create_default_https_context = ssl._create_unverified_context # Only used if pandas gives a SSLError def fetch_data(symbol): path = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/predictit_datasets/" filename = "{}.xlsx".format(symbol) df = pd.read_excel(path + filename, skiprows=4) df = df.set_index("Date") df = df.drop(df.columns[[7,8,9]], axis=1) df = df.drop("ID", 1) df.columns = [symbol + ":" + name.lower() for name in df.columns] return df all_data = pd.concat([ fetch_data("WARREN"), fetch_data("CRUZ"), fetch_data("MANCHIN"), fetch_data("SANDERS"), fetch_data("NELSON"), fetch_data("DONNELLY"), fetch_data("PELOSI"), fetch_data("MANAFORT"), fetch_data("BROWN"), fetch_data("RYAN"), fetch_data("STABENOW") ], axis=1) all_data.head()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Plot the closing prices for all the markets
%matplotlib inline closing_prices = all_data.loc[:, [("close" in name) for name in all_data.columns]] closing_prices.plot()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Slice just a specific time period from the dataframe
all_data.index = pd.to_datetime(all_data.index) subset_data = all_data[(all_data.index >= '09-01-2017') & (all_data.index <= '09-04-2019')] subset_data.head()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Define ExchangesAn exchange needs a name, an execution service, and streams of price data in order to function properly.The setups supported right now are the simulated execution service using simulated or stochastic data. More execution services will be made available in the future, as well as price streams so that live data and execution can be supported.
from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream #Exchange(name of exchange, service) #It looks like each Stream takes a name, and then a list of the closing prices. predictit_exch = Exchange("predictit", service=execute_order)( Stream("USD-WARREN", list(subset_data['WARREN:close'])), Stream("USD-CRUZ", list(subset_data['CRUZ:close'])), Stream("USD-MANCHIN", list(subset_data['MANCHIN:close'])), Stream("USD-SANDERS", list(subset_data['SANDERS:close'])), Stream("USD-NELSON", list(subset_data['NELSON:close'])), Stream("USD-DONNELLY", list(subset_data['DONNELLY:close'])), Stream("USD-PELOSI", list(subset_data['PELOSI:close'])), Stream("USD-MANAFORT", list(subset_data['MANAFORT:close'])), Stream("USD-BROWN", list(subset_data['BROWN:close'])), Stream("USD-RYAN", list(subset_data['RYAN:close'])), Stream("USD-STABENOW", list(subset_data['STABENOW:close'])) )
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Now that the exchanges have been defined we can define our features that we would like to include, excluding the prices we have provided for the exchanges. Doing it without adding other features. Just use price
#You still have to add "Streams" for all the standard columns open, high, low, close, volume in this case from tensortrade.data import DataFeed, Module with Module("predictit") as predictit_ns: predictit_nodes = [Stream(name, list(subset_data[name])) for name in subset_data.columns] #Then create the Feed from it feed = DataFeed([predictit_ns]) feed.next()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
PortfolioMake the portfolio using the any combinations of exchanges and intruments that the exchange supports
#I am going to have to add "instruments" for all 25 of the PredictIt markets I'm working with. from tensortrade.instruments import USD, WARREN, CRUZ, MANCHIN, SANDERS, NELSON, DONNELLY,\ PELOSI, MANAFORT, BROWN, RYAN, STABENOW from tensortrade.wallets import Wallet, Portfolio portfolio = Portfolio(USD, [ Wallet(predictit_exch, 10000 * USD), Wallet(predictit_exch, 0 * WARREN), Wallet(predictit_exch, 0 * CRUZ), Wallet(predictit_exch, 0 * MANCHIN), Wallet(predictit_exch, 0 * SANDERS), Wallet(predictit_exch, 0 * NELSON), Wallet(predictit_exch, 0 * DONNELLY), Wallet(predictit_exch, 0 * PELOSI), Wallet(predictit_exch, 0 * MANAFORT), Wallet(predictit_exch, 0 * BROWN), Wallet(predictit_exch, 0 * RYAN), Wallet(predictit_exch, 0 * STABENOW) ])
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Environment
from tensortrade.environments import TradingEnvironment env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) env.feed.next()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
^An environment doesn't just show the OHLCV for each instrument. It also shows free, locked, total, as well as "USD_BTC" Using 123's Ray example
import os parent_dir = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader" os.environ["PYTHONPATH"] = parent_dir + ":" + os.environ.get("PYTHONPATH", "") !PYTHONWARNINGS=ignore::yaml.YAMLLoadWarning #Import tensortrade import tensortrade # Define Exchanges from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream # Define External Data Feed (features) import ta from sklearn import preprocessing from tensortrade.data import DataFeed, Module # Portfolio from tensortrade.instruments import USD, BTC from tensortrade.wallets import Wallet, Portfolio from tensortrade.actions import ManagedRiskOrders from gym.spaces import Discrete # Environment from tensortrade.environments import TradingEnvironment import gym import ray from ray import tune from ray.tune import grid_search from ray.tune.registry import register_env import ray.rllib.agents.ppo as ppo import ray.rllib.agents.dqn as dqn from ray.tune.logger import pretty_print from tensortrade.rewards import RiskAdjustedReturns class RayTradingEnv(TradingEnvironment): def __init__(self): env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme="simple", reward_scheme="simple", window_size=15, enable_logger=False, renderers = 'screenlog' ) self.env = env self.action_space = self.env.action_space self.observation_space = self.env.observation_space def reset(self): return self.env.reset() def step(self, action): return self.env.step(action) def env_creator(env_config): return RayTradingEnv() register_env("ray_trading_env", env_creator) ray.init(ignore_reinit_error=True) config = dqn.DEFAULT_CONFIG.copy() config["num_gpus"] = 0 #config["num_workers"] = 4 #config["num_envs_per_worker"] = 8 # config["eager"] = False # config["timesteps_per_iteration"] = 100 # config["train_batch_size"] = 20 #config['log_level'] = "DEBUG" trainer = dqn.DQNTrainer(config=config, env="ray_trading_env") config
2020-03-05 22:30:39,190 INFO resource_spec.py:212 -- Starting Ray with 5.71 GiB memory available for workers and up to 2.86 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>). 2020-03-05 22:30:39,555 INFO services.py:1078 -- View the Ray dashboard at localhost:8265 2020-03-05 22:30:39,868 INFO trainer.py:420 -- Tip: set 'eager': true or the --eager flag to enable TensorFlow eager execution 2020-03-05 22:30:39,952 INFO trainer.py:580 -- Current log_level is WARN. For more information, set 'log_level': 'INFO' / 'DEBUG' or use the -v and -vv flags. /Users/jasonfiacco/Documents/Yale/Senior/thesis/env2/lib/python3.6/site-packages/gym/logger.py:30: UserWarning: WARN: Box bound precision lowered by casting to float32 /Users/jasonfiacco/Documents/Yale/Senior/thesis/env2/lib/python3.6/site-packages/ray/rllib/utils/from_config.py:134: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. 2020-03-05 22:30:42,573 WARNING util.py:37 -- Install gputil for GPU system monitoring.
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Train using the old fashioned RLLib way
for i in range(10): # Perform one iteration of training the policy with PPO print("Training iteration {}...".format(i)) result = trainer.train() print("result: {}".format(result)) if i % 100 == 0: checkpoint = trainer.save() print("checkpoint saved at", checkpoint) result['hist_stats']['episode_reward']
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
OR train using the tune way (better so far)
analysis = tune.run( "DQN", name = "DQN10-paralellism", checkpoint_at_end=True, stop={ "timesteps_total": 4000, }, config={ "env": "ray_trading_env", "lr": grid_search([1e-4]), # try different lrs "num_workers": 2, # parallelism, }, ) #Use the below command to see results #tensorboard --logdir=/Users/jasonfiacco/ray_results/DQN2 #Now you can plot the reward results of your tuner. dfs = analysis.trial_dataframes ax = None for d in dfs.values(): ax = d.episode_reward_mean.plot(ax=ax, legend=True)
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Restoring an already existing agent that I tuned
import os logdir = analysis.get_best_logdir("episode_reward_mean", mode="max") trainer.restore(os.path.join(logdir, "checkpoint_993/checkpoint-993")) trainer.restore("/Users/jasonfiacco/ray_results/DQN4/DQN_ray_trading_env_fedb24f0_0_lr=1e-06_2020-03-03_15-46-02kzbdv53d/checkpoint_5/checkpoint-5")
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Testing
#Set up a testing environment with test data. test_env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) for episode_num in range(1): state = test_env.reset() done = False cumulative_reward = 0 step = 0 action = trainer.compute_action(state) while not done: action = trainer.compute_action(state) state, reward, done, results = test_env.step(action) cumulative_reward += reward #Render every 100 steps: if step % 100 == 0: test_env.render() step += 1 print("Cumulative reward: ", cumulative_reward)
[2020-03-04 9:54:39 PM] Step: 1 [2020-03-04 9:54:41 PM] Step: 101 [2020-03-04 9:54:44 PM] Step: 201 [2020-03-04 9:54:47 PM] Step: 301 [2020-03-04 9:54:50 PM] Step: 401 [2020-03-04 9:54:52 PM] Step: 501 [2020-03-04 9:54:55 PM] Step: 601 [2020-03-04 9:54:58 PM] Step: 701 Cumulative reward: 4.969025307093819
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Plot
%matplotlib inline portfolio.performance.plot() portfolio.performance.net_worth.plot() #Plot the total balance in each type of item p = portfolio.performance p2 = p.iloc[:, :] weights = p2.loc[:, [("/worth" in name) for name in p2.columns]] weights.iloc[:, 1:8].plot()
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Try Plotly Render too
from tensortrade.environments.render import PlotlyTradingChart from tensortrade.environments.render import FileLogger chart_renderer = PlotlyTradingChart( height = 800 ) file_logger = FileLogger( filename='example.log', # omit or None for automatic file name path='training_logs' # create a new directory if doesn't exist, None for no directory ) price_history.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume'] env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='managed-risk', reward_scheme='risk-adjusted', window_size=20, price_history=price_history, renderers = [chart_renderer, file_logger] ) from tensortrade.agents import DQNAgent agent = DQNAgent(env) agent.train(n_episodes=1, n_steps=1000, render_interval=1)
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
Extra Stuff
apath = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/jasonfiacco-selectedmarkets-mytickers.xlsx" df = pd.read_excel(apath, skiprows=2) jason_tickers = df.iloc[:, 5].tolist() descriptions = df.iloc[:, 1].tolist() for ticker, description in zip(jason_tickers, descriptions): l = "{} = Instrument(\'{}\', 2, \'{}\')".format(ticker, ticker, description) print(l)
_____no_output_____
Apache-2.0
training/predict_it_v2-Ray-attempt.ipynb
jasonfiacco/deeptrader
1. Convert pdf to image
## NOTE: install tesseract (https://github.com/UB-Mannheim/tesseract/wiki) and Poppler first # !pip install pytesseract # !pip install Pillow # !pip install pdf2image # import statements from PIL import Image from pdf2image import convert_from_path import sys import os import numpy as np folder_path = 'C:\\Users\Vanessa\\Downloads\\for_ocr' file_list = os.listdir(folder_path) # remove duplicates from list unique_files = [file for file in file_list if "(1)" not in file] # convert pdf to image in PNG format def pdf_to_imgs(folder_path, file): pages = convert_from_path(f"{folder_path}\\{file}", 500) # counter for image file img_counter = 1 # for each unique page, make a filename and save as png for page in pages: filename = f"{file}_{img_counter}.png".replace('.pdf','') print(f'Saving {filename}') page.save(filename, 'PNG') img_counter += 1 for file in unique_files: pdf_to_imgs(folder_path, file)
_____no_output_____
MIT
dict_ocr.ipynb
vanessapigwin/scrapingpractice
2. Check file integrity, size
folder_path = 'C:\\Users\\Vanessa\\Jupyter Notebooks\\STUFF' file_list = [f for f in os.listdir(folder_path) if f.endswith('.png')] print('Total files to check:', len(file_list)) # getting maximum dimension of each image max_width = 0 max_height = 0 for file in file_list: try: with Image.open(os.path.join(folder_path, file)) as img: width, height = img.size if width > max_width: max_width = width if height > max_height: max_height = height except: print(file) print('Maximum Width: ', max_width) print('Maximum Height: ', max_height)
_____no_output_____
MIT
dict_ocr.ipynb
vanessapigwin/scrapingpractice
3. Convert image to OCR
import cv2 as cv import pytesseract pytesseract.pytesseract.tesseract_cmd=r'C:\Program Files\Tesseract-OCR\tesseract.exe' custom_config = r' --psm 6' # method to ocr def remove_header_bg(img): # convert image to hsv img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) h, s, v = cv.split(img_hsv) # threshold saturation img thresh1 = cv.threshold(s, 92, 255, cv.THRESH_BINARY)[1] # threshold value img then invert thresh2 = cv.threshold(v, 128, 255, cv.THRESH_BINARY_INV)[1] # make mask mask = cv.add(thresh1, thresh2) # apply mask to remove unwanted background on figure processed_img = img.copy() processed_img[mask==0] = (255,255,255) lined_img = processed_img.copy() # convert to greyscale gray = cv.cvtColor(lined_img, cv.COLOR_BGR2GRAY) blur = cv.GaussianBlur(gray,(5,5),0) thresh = cv.threshold(blur, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1] # remove horizontal lines hor_kernel = cv.getStructuringElement(cv.MORPH_RECT, (100,1)) remove_hor = cv.morphologyEx(thresh, cv.MORPH_OPEN, hor_kernel, iterations=2) cnts = cv.findContours(remove_hor, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: cv.drawContours(lined_img, [c], -1, (255,255,255), 5) # try to read text text = pytesseract.image_to_string(lined_img, config=custom_config) return text # get imgage files img_path = os.path.abspath('') imgs = [file for file in os.listdir(img_path) if file.endswith('.png')] imgs.sort() for img in imgs: fname = os.path.splitext(img)[0] image = cv.imread(img) title = remove_header_bg(image[1200:1700 , 100:5900]) header = remove_header_bg(image[1800:1950 , 100:5900]) contents = remove_header_bg(image[2100:7100 , 100:5900]) with open(f'{fname}.txt', 'a') as f: f.write(title) f.write(header) f.write(contents) print(fname,' converted') print('All img files converted')
HH-001_1 converted HH-002_1 converted HH-003_1 converted HH-004_1 converted HH-005_1 converted HH-006_1 converted HH-006_2 converted HH-007_1 converted HH-008_1 converted HH-010_1 converted HH-010_2 converted HH-011_1 converted HH-011_2 converted HH-012_1 converted HH-013_1 converted HH-013_2 converted HH-014_1 converted HH-014_2 converted HH-015_1 converted HH-015_2 converted HH-015_3 converted HH-015_4 converted IND-003_1 converted IND-003_2 converted IND-004_1 converted IND-004_2 converted IND-004_3 converted IND-005_1 converted IND-005_2 converted IND-005_3 converted IND-012_1 converted IND-015_1 converted IND-015_2 converted IND-015_3 converted IND-017_1 converted IND-017_2 converted IND-017_3 converted IND-047_1 converted IND_001_1 converted IND_002_1 converted IND_006_1 converted IND_007_1 converted IND_008_1 converted IND_009_1 converted IND_009_2 converted IND_010_1 converted IND_010_2 converted IND_010_3 converted IND_010_4 converted IND_010_5 converted IND_012_1 converted IND_013_1 converted IND_013_2 converted IND_014_1 converted IND_014_2 converted IND_014_3 converted IND_018_1 converted IND_019_1 converted IND_019_2 converted IND_020_1 converted IND_022_1 converted IND_022_2 converted IND_022_3 converted IND_022_4 converted IND_022_5 converted IND_023_1 converted IND_024_1 converted IND_024_2 converted IND_025_1 converted IND_025_2 converted IND_026_1 converted IND_027_1 converted IND_029_1 converted IND_031_1 converted IND_031_2 converted IND_032_1 converted IND_033_1 converted IND_033_2 converted IND_035_1 converted IND_035_2 converted IND_036_1 converted IND_037_1 converted IND_037_2 converted IND_038_1 converted IND_038_2 converted IND_039_1 converted IND_039_2 converted IND_039_3 converted IND_039_4 converted IND_039_5 converted IND_040_1 converted IND_040_2 converted IND_041_1 converted IND_041_2 converted IND_042_1 converted IND_042_2 converted IND_043_1 converted IND_043_2 converted IND_044_1 converted IND_044_2 converted IND_045_1 converted IND_047_1 converted IND_048_1 converted IND_049_1 converted IND_050_1 converted IND_051_1 converted IND_053_1 converted IND_055_1 converted All img files converted
MIT
dict_ocr.ipynb
vanessapigwin/scrapingpractice
[Table of Contents](./table_of_contents.ipynb) Smoothing
#format the book %matplotlib inline from __future__ import division, print_function from book_format import load_style load_style()
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
Introduction The performance of the Kalman filter is not optimal when you consider future data. For example, suppose we are tracking an aircraft, and the latest measurement deviates far from the current track, like so (I'll only consider 1 dimension for simplicity):
import matplotlib.pyplot as plt data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3, 10.1, 9.9, 10.2, 10.0, 9.9, 11.4] plt.plot(data) plt.xlabel('time') plt.ylabel('position');
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether * The aircraft started a turn awhile ago, but the previous measurements were noisy and didn't show the change. * The aircraft is turning, and this measurement is very noisy * The measurement is very noisy and the aircraft has not turned* The aircraft is turning in the opposite direction, and the measurement is extremely noisyNow, suppose the following measurements are: 11.3 12.1 13.3 13.9 14.5 15.2
data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2] plt.plot(data + data2);
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
Given these future measurements we can infer that yes, the aircraft initiated a turn. On the other hand, suppose these are the following measurements.
data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1] plt.plot(data + data3);
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy. An Overview of How Smoothers WorkThe Kalman filter is a *recursive* filter with the Markov property - it's estimate at step `k` is based only on the estimate from step `k-1` and the measurement at step `k`. But this means that the estimate from step `k-1` is based on step `k-2`, and so on back to the first epoch. Hence, the estimate at step `k` depends on all of the previous measurements, though to varying degrees. `k-1` has the most influence, `k-2` has the next most, and so on. Smoothing filters incorporate future measurements into the estimate for step `k`. The measurement from `k+1` will have the most effect, `k+2` will have less effect, `k+3` less yet, and so on. This topic is called *smoothing*, but I think that is a misleading name. I could smooth the data above by passing it through a low pass filter. The result would be smooth, but not necessarily accurate because a low pass filter will remove real variations just as much as it removes noise. In contrast, Kalman smoothers are *optimal* - they incorporate all available information to make the best estimate that is mathematically achievable. Types of SmoothersThere are three classes of Kalman smoothers that produce better tracking in these situations.* Fixed-Interval SmoothingThis is a batch processing based filter. This filter waits for all of the data to be collected before making any estimates. For example, you may be a scientist collecting data for an experiment, and don't need to know the result until the experiment is complete. A fixed-interval smoother will collect all the data, then estimate the state at each measurement using all available previous and future measurements. If it is possible for you to run your Kalman filter in batch mode it is always recommended to use one of these filters a it will provide much better results than the recursive forms of the filter from the previous chapters.* Fixed-Lag SmoothingFixed-lag smoothers introduce latency into the output. Suppose we choose a lag of 4 steps. The filter will ingest the first 3 measurements but not output a filtered result. Then, when the 4th measurement comes in the filter will produce the output for measurement 1, taking measurements 1 through 4 into account. When the 5th measurement comes in, the filter will produce the result for measurement 2, taking measurements 2 through 5 into account. This is useful when you need recent data but can afford a bit of lag. For example, perhaps you are using machine vision to monitor a manufacturing process. If you can afford a few seconds delay in the estimate a fixed-lag smoother will allow you to produce very accurate and smooth results.* Fixed-Point SmoothingA fixed-point filter operates as a normal Kalman filter, but also produces an estimate for the state at some fixed time $j$. Before the time $k$ reaches $j$ the filter operates as a normal filter. Once $k>j$ the filter estimates $x_k$ and then also updates its estimate for $x_j$ using all of the measurements between $j\dots k$. This can be useful to estimate initial paramters for a system, or for producing the best estimate for an event that happened at a specific time. For example, you may have a robot that took a photograph at time $j$. You can use a fixed-point smoother to get the best possible pose information for the camera at time $j$ as the robot continues moving. Choice of FiltersThe choice of these filters depends on your needs and how much memory and processing time you can spare. Fixed-point smoothing requires storage of all measurements, and is very costly to compute because the output is for every time step is recomputed for every measurement. On the other hand, the filter does produce a decent output for the current measurement, so this filter can be used for real time applications.Fixed-lag smoothing only requires you to store a window of data, and processing requirements are modest because only that window is processed for each new measurement. The drawback is that the filter's output always lags the input, and the smoothing is not as pronounced as is possible with fixed-interval smoothing.Fixed-interval smoothing produces the most smoothed output at the cost of having to be batch processed. Most algorithms use some sort of forwards/backwards algorithm that is only twice as slow as a recursive Kalman filter. Fixed-Interval Smoothing There are many fixed-lag smoothers available in the literature. I have chosen to implement the smoother invented by Rauch, Tung, and Striebel because of its ease of implementation and efficiency of computation. It is also the smoother I have seen used most often in real applications. This smoother is commonly known as an RTS smoother.Derivation of the RTS smoother runs to several pages of densely packed math. I'm not going to inflict it on you. Instead I will briefly present the algorithm, equations, and then move directly to implementation and demonstration of the smoother.The RTS smoother works by first running the Kalman filter in a batch mode, computing the filter output for each step. Given the filter output for each measurement along with the covariance matrix corresponding to each output the RTS runs over the data backwards, incorporating its knowledge of the future into the past measurements. When it reaches the first measurement it is done, and the filtered output incorporates all of the information in a maximally optimal form.The equations for the RTS smoother are very straightforward and easy to implement. This derivation is for the linear Kalman filter. Similar derivations exist for the EKF and UKF. These steps are performed on the output of the batch processing, going backwards from the most recent in time back to the first estimate. Each iteration incorporates the knowledge of the future into the state estimate. Since the state estimate already incorporates all of the past measurements the result will be that each estimate will contain knowledge of all measurements in the past and future. Here is it very important to distinguish between past, present, and future so I have used subscripts to denote whether the data is from the future or not. Predict Step $$\begin{aligned}\mathbf{P} &= \mathbf{FP}_k\mathbf{F}^\mathsf{T} + \mathbf{Q }\end{aligned}$$ Update Step $$\begin{aligned}\mathbf{K}_k &= \mathbf{P}_k\mathbf{F}^\mathsf{T}\mathbf{P}^{-1} \\\mathbf{x}_k &= \mathbf{x}_k + \mathbf{K}_k(\mathbf{x}_{k+1} - \mathbf{Fx}_k) \\\mathbf{P}_k &= \mathbf{P}_k + \mathbf{K}_k(\mathbf{P}_{k+1} - \mathbf{P})\mathbf{K}_k^\mathsf{T}\end{aligned}$$As always, the hardest part of the implementation is correctly accounting for the subscripts. A basic implementation without comments or error checking would be:```pythondef rts_smoother(Xs, Ps, F, Q): n, dim_x, _ = Xs.shape smoother gain K = zeros((n,dim_x, dim_x)) x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy for k in range(n-2,-1,-1): Pp[k] = dot(F, P[k]).dot(F.T) + Q predicted covariance K[k] = dot(P[k], F.T).dot(inv(Pp[k])) x[k] += dot(K[k], x[k+1] - dot(F, x[k])) P[k] += dot(K[k], P[k+1] - Pp[k]).dot(K[k].T) return (x, P, K, Pp)``` This implementation mirrors the implementation provided in FilterPy. It assumes that the Kalman filter is being run externally in batch mode, and the results of the state and covariances are passed in via the `Xs` and `Ps` variable.Here is an example.
import numpy as np from numpy import random from numpy.random import randn import matplotlib.pyplot as plt from filterpy.kalman import KalmanFilter import kf_book.book_plots as bp def plot_rts(noise, Q=0.001, show_velocity=False): random.seed(123) fk = KalmanFilter(dim_x=2, dim_z=1) fk.x = np.array([0., 1.]) # state (x and dx) fk.F = np.array([[1., 1.], [0., 1.]]) # state transition matrix fk.H = np.array([[1., 0.]]) # Measurement function fk.P = 10. # covariance matrix fk.R = noise # state uncertainty fk.Q = Q # process uncertainty # create noisy data zs = np.asarray([t + randn()*noise for t in range (40)]) # filter data with Kalman filter, than run smoother on it mu, cov, _, _ = fk.batch_filter(zs) M, P, C, _ = fk.rts_smoother(mu, cov) # plot data if show_velocity: index = 1 print('gu') else: index = 0 if not show_velocity: bp.plot_measurements(zs, lw=1) plt.plot(M[:, index], c='b', label='RTS') plt.plot(mu[:, index], c='g', ls='--', label='KF output') if not show_velocity: N = len(zs) plt.plot([0, N], [0, N], 'k', lw=2, label='track') plt.legend(loc=4) plt.show() plot_rts(7.)
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output.With a perhaps more reasonable amount of noise we can see that the RTS output nearly lies on the ideal output. The Kalman filter output, while much better, still varies by a far greater amount.
plot_rts(noise=1.)
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
However, we must understand that this smoothing is predicated on the system model. We have told the filter that what we are tracking follows a constant velocity model with very low process error. When the filter *looks ahead* it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph.
plot_rts(noise=7., Q=.1)
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
This underscores the fact that these filters are not *smoothing* the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements.Let's wrap this up by looking at the velocity estimates of Kalman filter vs the RTS smoother.
plot_rts(7.,show_velocity=True)
gu
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
The improvement in the velocity, which is an hidden variable, is even more dramatic. Fixed-Lag SmoothingThe RTS smoother presented above should always be your choice of algorithm if you can run in batch mode because it incorporates all available data into each estimate. Not all problems allow you to do that, but you may still be interested in receiving smoothed values for previous estimates. The number line below illustrates this concept.
from kf_book.book_plots import figsize from kf_book.smoothing_internal import * with figsize(y=2): show_fixed_lag_numberline()
_____no_output_____
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps.Derivation for this math is beyond the scope of this book; Dan Simon's *Optimal State Estimation* [2] has a very good exposition if you are interested. The essense of the idea is that instead of having a state vector $\mathbf{x}$ we make an augmented state containing$$\mathbf{x} = \begin{bmatrix}\mathbf{x}_k \\ \mathbf{x}_{k-1} \\ \vdots\\ \mathbf{x}_{k-N+1}\end{bmatrix}$$This yields a very large covariance matrix that contains the covariance between states at different steps. FilterPy's class `FixedLagSmoother` takes care of all of this computation for you, including creation of the augmented matrices. All you need to do is compose it as if you are using the `KalmanFilter` class and then call `smooth()`, which implements the predict and update steps of the algorithm. Each call of `smooth` computes the estimate for the current measurement, but it also goes back and adjusts the previous `N-1` points as well. The smoothed values are contained in the list `FixedLagSmoother.xSmooth`. If you use `FixedLagSmoother.x` you will get the most recent estimate, but it is not smoothed and is no different from a standard Kalman filter output.
from filterpy.kalman import FixedLagSmoother, KalmanFilter import numpy.random as random fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8) fls.x = np.array([0., .5]) fls.F = np.array([[1.,1.], [0.,1.]]) fls.H = np.array([[1.,0.]]) fls.P *= 200 fls.R *= 5. fls.Q *= 0.001 kf = KalmanFilter(dim_x=2, dim_z=1) kf.x = np.array([0., .5]) kf.F = np.array([[1.,1.], [0.,1.]]) kf.H = np.array([[1.,0.]]) kf.P *= 200 kf.R *= 5. kf.Q *= 0.001 N = 4 # size of lag nom = np.array([t/2. for t in range (0, 40)]) zs = np.array([t + random.randn()*5.1 for t in nom]) for z in zs: fls.smooth(z) kf_x, _, _, _ = kf.batch_filter(zs) x_smooth = np.array(fls.xSmooth)[:, 0] fls_res = abs(x_smooth - nom) kf_res = abs(kf_x[:, 0] - nom) plt.plot(zs,'o', alpha=0.5, marker='o', label='zs') plt.plot(x_smooth, label='FLS') plt.plot(kf_x[:, 0], label='KF', ls='--') plt.legend(loc=4) print('standard deviation fixed-lag: {:.3f}'.format(np.mean(fls_res))) print('standard deviation kalman: {:.3f}'.format(np.mean(kf_res)))
standard deviation fixed-lag: 2.616 standard deviation kalman: 3.562
CC-BY-4.0
13-Smoothing.ipynb
yangzongsheng/kalman
準備
# バージョン指定時にコメントアウト #!pip install torch==1.7.0 #!pip install torchvision==0.8.1 import torch import torchvision # バージョンの確認 print(torch.__version__) print(torchvision.__version__) # Google ドライブにマウント from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/MyDrive/Colab Notebooks/gan_sample/chapter2' import os import numpy as np import torch import torch.nn as nn import torch.optim as optimizers import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision import torchvision.transforms as transforms import matplotlib import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
MIT
chapter2/section2_1-AE.ipynb
tms-byte/gan_sample
データセットの作成
np.random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # データの取得 root = os.path.join('data', 'mnist') transform = transforms.Compose([transforms.ToTensor(), lambda x: x.view(-1)]) mnist_train = \ torchvision.datasets.MNIST(root=root, download=True, train=True, transform=transform) mnist_test = \ torchvision.datasets.MNIST(root=root, download=True, train=False, transform=transform) train_dataloader = DataLoader(mnist_train, batch_size=100, shuffle=True) test_dataloader = DataLoader(mnist_test, batch_size=1, shuffle=False)
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to data/mnist/MNIST/raw/train-images-idx3-ubyte.gz
MIT
chapter2/section2_1-AE.ipynb
tms-byte/gan_sample
ネットワークの定義
class Autoencoder(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.l1 = nn.Linear(784, 200) self.l2 = nn.Linear(200, 784) def forward(self, x): # エンコーダ h = self.l1(x) # 活性化関数 h = torch.relu(h) # デコーダ h = self.l2(h) # シグモイド関数で0~1の値域に変換 y = torch.sigmoid(h) return y
_____no_output_____
MIT
chapter2/section2_1-AE.ipynb
tms-byte/gan_sample
学習の実行
# モデルの設定 model = Autoencoder(device=device).to(device) # 損失関数の設定 criterion = nn.BCELoss() # 最適化関数の設定 optimizer = optimizers.Adam(model.parameters()) epochs = 10 # エポックのループ for epoch in range(epochs): train_loss = 0. # バッチサイズのループ for (x, _) in train_dataloader: x = x.to(device) # 訓練モードへの切替 model.train() # 順伝播計算 preds = model(x) # 入力画像xと復元画像predsの誤差計算 loss = criterion(preds, x) # 勾配の初期化 optimizer.zero_grad() # 誤差の勾配計算 loss.backward() # パラメータの更新 optimizer.step() # 訓練誤差の更新 train_loss += loss.item() train_loss /= len(train_dataloader) print('Epoch: {}, Loss: {:.3f}'.format( epoch+1, train_loss ))
Epoch: 1, Loss: 0.153 Epoch: 2, Loss: 0.085 Epoch: 3, Loss: 0.075 Epoch: 4, Loss: 0.071 Epoch: 5, Loss: 0.069 Epoch: 6, Loss: 0.068 Epoch: 7, Loss: 0.067 Epoch: 8, Loss: 0.067 Epoch: 9, Loss: 0.066 Epoch: 10, Loss: 0.066
MIT
chapter2/section2_1-AE.ipynb
tms-byte/gan_sample
画像の復元
# dataloaderからのデータ取り出し x, _ = next(iter(test_dataloader)) x = x.to(device) # 評価モードへの切替 model.eval() # 復元画像 x_rec = model(x) # 入力画像、復元画像の表示 for i, image in enumerate([x, x_rec]): image = image.view(28, 28).detach().cpu().numpy() plt.subplot(1, 2, i+1) plt.imshow(image, cmap='binary_r') plt.axis('off') plt.show()
_____no_output_____
MIT
chapter2/section2_1-AE.ipynb
tms-byte/gan_sample
Optimization and gradient descent method
from IPython.display import IFrame IFrame(src="https://cdnapisec.kaltura.com/p/2356971/sp/235697100/embedIframeJs/uiconf_id/41416911/partner_id/2356971?iframeembed=true&playerId=kaltura_player&entry_id=1_wota11ay&flashvars[streamerType]=auto&amp;flashvars[localizationCode]=en&amp;flashvars[leadWithHTML5]=true&amp;flashvars[sideBarContainer.plugin]=true&amp;flashvars[sideBarContainer.position]=left&amp;flashvars[sideBarContainer.clickToClose]=true&amp;flashvars[chapters.plugin]=true&amp;flashvars[chapters.layout]=vertical&amp;flashvars[chapters.thumbnailRotator]=false&amp;flashvars[streamSelector.plugin]=true&amp;flashvars[EmbedPlayer.SpinnerTarget]=videoHolder&amp;flashvars[dualScreen.plugin]=true&amp;flashvars[hotspots.plugin]=1&amp;flashvars[Kaltura.addCrossoriginToIframe]=true&amp;&wid=1_o38cisoq",width='800', height='500')
_____no_output_____
MIT
docs/_sources/Module1/m1_06.ipynb
liuzhengqi1996/math452_Spring2022
One-step error probability Write a computer program implementing asynchronous deterministic updates for a Hopfield network. Use Hebb's rule with $w_{ii}=0$. Generate and store p=[12,24,48,70,100,120] random patterns with N=120 bits. Each bit is either +1 or -1 with probability $\tfrac{1}{2}$.For each value of ppp estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials. Here, one trial means that you generate and store a set of p random patterns, feed one of them, and perform one asynchronous update of a single randomly chosen neuron. If in some trials you encounter sgn(0), simply set sgn(0)=1.List below the values of $P_{\text {error}}^{t=1}$ that you obtained in the following form: [$p_1,p_2,\ldots,p_{6}$], where $p_n$ is the value of $P_{\text {error}}^{t=1}$ for the n-th value of p from the list above. Give four decimal places for each $p_n$
import numpy as np import time def calculate_instance( n, p, zero_diagonal): #Create p random patterns patterns = [] for i in range(p): patterns.append(np.random.choice([-1,1],n)) #Create weights matrix according to hebbs rule weights = patterns[0][:,None]*patterns[0] for el in patterns[1:]: weights = weights + el[:,None]*el weights = np.true_divide(weights, n) #Fill diagonal with zeroes if zero_diagonal: np.fill_diagonal(weights,0) #Feed random pattern as input and test if an error occurs S1 = patterns[0] chosen_i = np.random.choice(range(n)) S_i_old = S1[chosen_i] S_i = esign(np.dot(weights[chosen_i], S1)) #breakpoint() return S_i_old == S_i def esign(x): if(x == 0): return 1 else: return np.sign(x)
_____no_output_____
MIT
notebooks/1-One-step-error-probability.ipynb
EdinCitaku/ANN-notebooks
List your numerically computed $P_{\text {error}}^{t=1}$ for the parameters given above.
p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, True) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ")
Number of patterns: 12, P_error(t=1): 0.00057 Number of patterns: 24, P_error(t=1): 0.01143 Number of patterns: 48, P_error(t=1): 0.05569 Number of patterns: 70, P_error(t=1): 0.09447 Number of patterns: 100, P_error(t=1): 0.13699 Number of patterns: 120, P_error(t=1): 0.15952
MIT
notebooks/1-One-step-error-probability.ipynb
EdinCitaku/ANN-notebooks
Repeat the task, but now apply Hebb's rule without setting the diagonal weights to zero. For each value of p listed above, estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials.
p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, False) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ")
Number of patterns: 12, P_error(t=1): 0.00021 Number of patterns: 24, P_error(t=1): 0.0029 Number of patterns: 48, P_error(t=1): 0.0127 Number of patterns: 70, P_error(t=1): 0.01841 Number of patterns: 100, P_error(t=1): 0.02115 Number of patterns: 120, P_error(t=1): 0.02116
MIT
notebooks/1-One-step-error-probability.ipynb
EdinCitaku/ANN-notebooks
Acquiring Data from open repositoriesA crucial step in the work of a computational biologist is not only to analyse data, but acquiring datasets to analyse as well as toy datasets to test out computational methods and algorithms. The internet is full of such open datasets. Sometimes you have to sign up and make a user to get authentication, especially for medical data. This can sometimes be time consuming, so here we will deal with easy access resources, mostly of modest size. Multiple python libraries provide a `dataset` module which makes the effort to fetch online data extremely seamless, with little requirement for preprocessing. Goal of the notebookHere you will get familiar with some ways to fetch datasets from online. We do some data exploration on the data just for illustration, but the methods will be covered later. Useful resources and linksWhen playing around with algorithms, it can be practical to use relatively small datasets. A good example is the `datasets` submodule of `scikit-learn`. `Nilearn` (library for neuroimaging) also provides a collection of neuroimaging datasets. Many datasets can also be acquired through the competition website [Kaggle](https://www.kaggle.com), in which they describe how to access the data. Links- [OpenML](https://www.openml.org/search?type=data)- [Nilearn datasets](https://nilearn.github.io/modules/reference.htmlmodule-nilearn.datasets)- [Sklearn datasets](https://scikit-learn.org/stable/modules/classes.html?highlight=datasetsmodule-sklearn.datasets)- [Kaggle](https://www.kaggle.com/datasets)- [MEDNIST]- [**Awesomedata**](https://github.com/awesomedata/awesome-public-datasets) - We strongly recommend to check out the Awesomedata lists of public datasets, covering topics such as [biology/medicine](https://github.com/awesomedata/awesome-public-datasetsbiology) and [neuroscience](https://github.com/awesomedata/awesome-public-datasetsneuroscience)- [Papers with code](https://paperswithcode.com)- [SNAP](https://snap.stanford.edu/data/) - Stanford Large Network Dataset Collection - [Open Graph Benchmark (OGB)](https://github.com/snap-stanford/ogb) - Network datasets- [Open Neuro](https://openneuro.org/)- [Open fMRI](https://openfmri.org/dataset/)
# import basic libraries import numpy as np import pandas as pd from matplotlib import pyplot as plt
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
We start with scikit-learn's datasets for testing out ML algorithms. Visit [here](https://scikit-learn.org/stable/modules/classes.html?highlight=datasetsmodule-sklearn.datasets) for an overview of the datasets.
from sklearn.datasets import fetch_olivetti_faces, fetch_20newsgroups, load_breast_cancer, load_diabetes, load_digits, load_iris
C:\Users\Peder\Anaconda3\envs\cbm101\lib\importlib\_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds)
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Load the MNIST dataset (images of hand written digits)
X,y = load_digits(return_X_y=True) y.shape X.shape #1797 images, 64 pixels per image
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
exercise 1. Make a function `plot` taking an argument (k) to visualize the k'th sample. It is currently flattened, you will need to reshape it. Use `plt.imshow` for plotting.
# %load solutions/ex2_1.py def plot(k): plt.imshow(X[k].reshape(8,8), cmap='gray') plt.title(f"Number = {y[k]}") plt.show() plot(15); plot(450) faces = fetch_olivetti_faces()
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Exercise 2. Inspect the dataset. How many classes are there? How many samples per class? Also, plot some examples. What do the classes represent?
# %load solutions/ex2_2.py # example solution. # You are not expected to make a nice plotting function, # you can simply call plt.imshow a number of times and observe print(faces.DESCR) # this shows there are 40 classes, 10 samples per class print(faces.target) #the targets i.e. classes print(np.unique(faces.target).shape) # another way to see n_classes X = faces.images y = faces.target fig = plt.figure(figsize=(16,5)) idxs = [0,1,2, 11,12,13, 40,41] for i,k in enumerate(idxs): ax=fig.add_subplot(2,4,i+1) ax.imshow(X[k]) ax.set_title(f"target={y[k]}") # looking at a few plots shows that each target is a single person.
.. _olivetti_faces_dataset: The Olivetti faces dataset -------------------------- `This dataset contains a set of face images`_ taken between April 1992 and April 1994 at AT&T Laboratories Cambridge. The :func:`sklearn.datasets.fetch_olivetti_faces` function is the data fetching / caching function that downloads the data archive from AT&T. .. _This dataset contains a set of face images: http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html As described on the original website: There are ten different images of each of 40 distinct subjects. For some subjects, the images were taken at different times, varying the lighting, facial expressions (open / closed eyes, smiling / not smiling) and facial details (glasses / no glasses). All the images were taken against a dark homogeneous background with the subjects in an upright, frontal position (with tolerance for some side movement). **Data Set Characteristics:** ================= ===================== Classes 40 Samples total 400 Dimensionality 4096 Features real, between 0 and 1 ================= ===================== The image is quantized to 256 grey levels and stored as unsigned 8-bit integers; the loader will convert these to floating point values on the interval [0, 1], which are easier to work with for many algorithms. The "target" for this database is an integer from 0 to 39 indicating the identity of the person pictured; however, with only 10 examples per class, this relatively small dataset is more interesting from an unsupervised or semi-supervised perspective. The original dataset consisted of 92 x 112, while the version available here consists of 64x64 images. When using these images, please give credit to AT&T Laboratories Cambridge. [ 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14 15 15 15 15 15 15 15 15 15 15 16 16 16 16 16 16 16 16 16 16 17 17 17 17 17 17 17 17 17 17 18 18 18 18 18 18 18 18 18 18 19 19 19 19 19 19 19 19 19 19 20 20 20 20 20 20 20 20 20 20 21 21 21 21 21 21 21 21 21 21 22 22 22 22 22 22 22 22 22 22 23 23 23 23 23 23 23 23 23 23 24 24 24 24 24 24 24 24 24 24 25 25 25 25 25 25 25 25 25 25 26 26 26 26 26 26 26 26 26 26 27 27 27 27 27 27 27 27 27 27 28 28 28 28 28 28 28 28 28 28 29 29 29 29 29 29 29 29 29 29 30 30 30 30 30 30 30 30 30 30 31 31 31 31 31 31 31 31 31 31 32 32 32 32 32 32 32 32 32 32 33 33 33 33 33 33 33 33 33 33 34 34 34 34 34 34 34 34 34 34 35 35 35 35 35 35 35 35 35 35 36 36 36 36 36 36 36 36 36 36 37 37 37 37 37 37 37 37 37 37 38 38 38 38 38 38 38 38 38 38 39 39 39 39 39 39 39 39 39 39] (40,)
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Once you have made yourself familiar with the dataset you can do some data exploration with unsupervised methods, like below. The next few lines of code are simply for illustration, don't worry about the code (we will cover unsupervised methods in submodule F).
from sklearn.decomposition import randomized_svd X = faces.data n_dim = 3 u, s, v = randomized_svd(X, n_dim)
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Now we have factorized the images into their constituent parts. The code below displays the various components isolated one by one.
def show_ims(ims): fig = plt.figure(figsize=(16,10)) idxs = [0,1,2, 11,12,13, 40,41,42, 101,101,103] for i,k in enumerate(idxs): ax=fig.add_subplot(3,4,i+1) ax.imshow(ims[k]) ax.set_title(f"target={y[k]}") for i in range(n_dim): my_s = np.zeros(s.shape[0]) my_s[i] = s[i] recon = u@np.diag(my_s)@v recon = recon.reshape(400,64,64) show_ims(recon)
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Are you able to see what the components represent? It at least looks like the second component signifies the lightning (the light direction), the third highlights eyebrows and facial chin shape.
from sklearn.manifold import TSNE tsne = TSNE(init='pca', random_state=0) trans = tsne.fit_transform(X) m = 8*10 # choose 4 people plt.figure(figsize=(16,10)) xs, ys = trans[:m,0], trans[:m,1] plt.scatter(xs, ys, c=y[:m], cmap='rainbow') for i,v in enumerate(zip(xs,ys, y[:m])): xx,yy,s = v #plt.text(xx,yy,s) #class plt.text(xx,yy,i) #index
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Many people seem to have multiple subclusters. What is the difference between those clusters? (e.g. 68,62,65 versus the other 60's)
ims = faces.images idxs = [68,62,65,66,60,64,63] #idxs = [9,4,1, 5,3] for k in idxs: plt.imshow(ims[k], cmap='gray') plt.show() def show(im): return plt.imshow(im, cmap='gray') import pandas as pd df= pd.read_csv('data/archive/covid_impact_on_airport_traffic.csv') df.shape df.describe() df.head() df.Country.unique() df.ISO_3166_2.unique() df.AggregationMethod.unique()
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Here we will look at [OpenML](https://www.openml.org/) - a repository of open datasets free to explore data and test methods. Fetching an OpenML datasetWe need to pass in an ID to access, as follows:
from sklearn.datasets import fetch_openml
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
OpenML contains all sorts of datatypes. By browsing the website we found a electroencephalography (EEG) dataset to explore:
data_id = 1471 #this was found by browsing OpenML dataset = fetch_openml(data_id=data_id, as_frame=True) dir(dataset) dataset.url type(dataset) print(dataset.DESCR) original_names = ['AF3', 'F7', 'F3', 'FC5', 'T7', 'P', 'O1', 'O2', 'P8', 'T8', 'FC6', 'F4', 'F8', 'AF4'] dataset.feature_names df = dataset.frame df.head() df.shape[0] / 117 # 128 frames per second df = dataset.frame y = df.Class #df.drop(columns='Class', inplace=True) df.dtypes #def summary(s): # print(s.max(), s.min(), s.mean(), s.std()) # print() # #for col in df.columns[:-1]: # column = df.loc[:,col] # summary(column) df.plot()
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
From the plot we can quickly identify a bunch of huge outliers, making the plot look completely uselss. We assume these are artifacts, and remove them.
df2 = df.iloc[:,:-1].clip_upper(6000) df2.plot()
C:\Users\Peder\Anaconda3\envs\cbm101\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: clip_upper(threshold) is deprecated, use clip(upper=threshold) instead """Entry point for launching an IPython kernel.
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Now we see better what is going on. Lets just remove the frames corresponding to those outliers
frames = np.nonzero(np.any(df.iloc[:,:-1].values>5000, axis=1))[0] frames df.drop(index=frames, inplace=True) df.plot(figsize=(16,8)) plt.legend(labels=original_names) df.columns
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Do some modelling of the data
from sklearn.linear_model import LogisticRegression lasso = LogisticRegression(penalty='l2') X = df.values[:,:-1] y = df.Class y = y.astype(np.int) - 1 # map to 0,1 print(X.shape) print(y.shape) lasso.fit(X,y) comp = (lasso.predict(X) == y).values np.sum(comp.astype(np.int))/y.shape[0] # shitty accuracy lasso.coef_[0].shape names = dataset.feature_names original_names coef = lasso.coef_[0] plt.barh(range(coef.shape[0]), coef) plt.yticks(ticks=range(14),labels=original_names) plt.show()
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
Interpreting the coeficients: we naturally tend to read the magnitude of the coefficients as feature importance. That is a fair interpretation, but currently we did not scale our features to a comparable range prior to fittting the model, so we cannot draw that conclusion. Extra exercise. Go to [OpenML](https://openml.org) and use the search function (or just look around) to find any dataset that interest you. Load it using the above methodology, and try to do anything you can to understand the datatype, visualize it etc.
### YOUR CODE HERE
_____no_output_____
MIT
C_Data_resources/2_Open_datasets.ipynb
oercompbiomed/CBM101
View Campaign and InteractionsIn the first notebook `Personalize_BuildCampaign.ipynb` you successfully built and deployed a recommendation model using deep learning with Amazon Personalize.This notebook will expand on that and will walk you through adding the ability to react to real time behavior of users. If their intent changes while browsing a movie, you will see revised recommendations based on that behavior.It will also showcase demo code for simulating user behavior selecting movies before the recommendations are returned. Below we start with just importing libraries that we need to interact with Personalize
# Imports import boto3 import json import numpy as np import pandas as pd import time import uuid
_____no_output_____
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Below you will paste in the campaign ARN that you used in your previous notebook. Also pick a random user ID from 50 - 300. Lastly you will also need to find your Dataset Group ARN from the previous notebook.
# Setup and Config # Recommendations from Event data personalize = boto3.client('personalize') personalize_runtime = boto3.client('personalize-runtime') HRNN_Campaign_ARN = "arn:aws:personalize:us-east-1:930444659029:campaign/DEMO-campaign" # Define User USER_ID = "676" # Dataset Group Arn: datasetGroupArn = "arn:aws:personalize:us-east-1:930444659029:dataset-group/DEMO-dataset-group" # Establish a connection to Personalize's Event Streaming personalize_events = boto3.client(service_name='personalize-events')
_____no_output_____
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Creating an Event TrackerBefore your recommendation system can respond to real time events you will need an event tracker, the code below will generate one and can be used going forward with this lab. Feel free to name it something more clever.
response = personalize.create_event_tracker( name='MovieClickTracker', datasetGroupArn=datasetGroupArn ) print(response['eventTrackerArn']) print(response['trackingId']) TRACKING_ID = response['trackingId']
arn:aws:personalize:us-east-1:930444659029:event-tracker/bbe80586 b8a5944c-8095-40ff-a915-2a6af53b7f55
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Configuring Source DataAbove you'll see your tracking ID and this has been assigned to a variable so no further action is needed by you. The lines below are going to setup the data used for recommendations so you can render the list of movies later.
data = pd.read_csv('./ml-20m/ratings.csv', sep=',', dtype={'userid': "int64", 'movieid': "int64", 'rating': "float64", 'timestamp': "int64"}) pd.set_option('display.max_rows', 5) data.rename(columns = {'userId':'USER_ID','movieId':'ITEM_ID','rating':'RATING','timestamp':'TIMESTAMP'}, inplace = True) data = data[data['RATING'] > 3] # keep only movies rated 3 data = data[['USER_ID', 'ITEM_ID', 'TIMESTAMP']] # select columns that match the columns in the schema below data items = pd.read_csv('./ml-20m/movies.csv', sep=',', usecols=[0,1], header=0) items.columns = ['ITEM_ID', 'TITLE'] user_id, item_id, _ = data.sample().values[0] item_title = items.loc[items['ITEM_ID'] == item_id].values[0][-1] print("USER: {}".format(user_id)) print("ITEM: {}".format(item_title)) items
USER: 40094 ITEM: Hotel Rwanda (2004)
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Getting RecommendationsJust like in the previous notebook it is a great idea to get a list of recommendatiosn first and then see how additional behavior by a user alters the recommendations.
# Get Recommendations as is get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = HRNN_Campaign_ARN, userId = USER_ID, ) item_list = get_recommendations_response['itemList'] title_list = [items.loc[items['ITEM_ID'] == np.int(item['itemId'])].values[0][-1] for item in item_list] print("Recommendations: {}".format(json.dumps(title_list, indent=2))) print(item_list)
Recommendations: [ "Signs (2002)", "Panic Room (2002)", "Vanilla Sky (2001)", "American Pie 2 (2001)", "Blade II (2002)", "Bourne Identity, The (2002)", "Star Wars: Episode II - Attack of the Clones (2002)", "Memento (2000)", "Fast and the Furious, The (2001)", "Unbreakable (2000)", "Snatch (2000)", "Austin Powers in Goldmember (2002)", "Resident Evil (2002)", "xXx (2002)", "Sum of All Fears, The (2002)", "Others, The (2001)", "American Beauty (1999)", "Pulp Fiction (1994)", "Spider-Man (2002)", "Minority Report (2002)", "Rock, The (1996)", "Ring, The (2002)", "Black Hawk Down (2001)", "Ocean's Eleven (2001)", "Schindler's List (1993)" ] [{'itemId': '5502'}, {'itemId': '5266'}, {'itemId': '4975'}, {'itemId': '4718'}, {'itemId': '5254'}, {'itemId': '5418'}, {'itemId': '5378'}, {'itemId': '4226'}, {'itemId': '4369'}, {'itemId': '3994'}, {'itemId': '4011'}, {'itemId': '5481'}, {'itemId': '5219'}, {'itemId': '5507'}, {'itemId': '5400'}, {'itemId': '4720'}, {'itemId': '2858'}, {'itemId': '296'}, {'itemId': '5349'}, {'itemId': '5445'}, {'itemId': '733'}, {'itemId': '5679'}, {'itemId': '5010'}, {'itemId': '4963'}, {'itemId': '527'}]
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Simulating User BehaviorThe lines below provide a code sample that simulates a user interacting with a particular item, you will then get recommendations that differ from those when you started.
session_dict = {} def send_movie_click(USER_ID, ITEM_ID): """ Simulates a click as an envent to send an event to Amazon Personalize's Event Tracker """ # Configure Session try: session_ID = session_dict[USER_ID] except: session_dict[USER_ID] = str(uuid.uuid1()) session_ID = session_dict[USER_ID] # Configure Properties: event = { "itemId": str(ITEM_ID), } event_json = json.dumps(event) # Make Call personalize_events.put_events( trackingId = TRACKING_ID, userId= USER_ID, sessionId = session_ID, eventList = [{ 'sentAt': int(time.time()), 'eventType': 'EVENT_TYPE', 'properties': event_json }] )
_____no_output_____
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
Immediately below this line will update the tracker as if the user has clicked a particular title.
# Pick a movie, we will use ID 1653 or Gattica send_movie_click(USER_ID=USER_ID, ITEM_ID=1653)
_____no_output_____
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
After executing this block you will see the alterations in the recommendations now that you have event tracking enabled and that you have sent the events to the service.
get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = HRNN_Campaign_ARN, userId = str(USER_ID), ) item_list = get_recommendations_response['itemList'] title_list = [items.loc[items['ITEM_ID'] == np.int(item['itemId'])].values[0][-1] for item in item_list] print("Recommendations: {}".format(json.dumps(title_list, indent=2))) print(item_list)
Recommendations: [ "Signs (2002)", "Fifth Element, The (1997)", "Gattaca (1997)", "Unbreakable (2000)", "Face/Off (1997)", "Predator (1987)", "Dark City (1998)", "Star Wars: Episode II - Attack of the Clones (2002)", "Cube (1997)", "Spider-Man (2002)", "Game, The (1997)", "Minority Report (2002)", "X-Files: Fight the Future, The (1998)", "Twelve Monkeys (a.k.a. 12 Monkeys) (1995)", "Rock, The (1996)", "Vanilla Sky (2001)", "Starship Troopers (1997)", "Bourne Identity, The (2002)", "Sneakers (1992)", "American Beauty (1999)", "Austin Powers in Goldmember (2002)", "Memento (2000)", "Pulp Fiction (1994)", "X-Men (2000)", "Star Wars: Episode I - The Phantom Menace (1999)" ] [{'itemId': '5502'}, {'itemId': '1527'}, {'itemId': '1653'}, {'itemId': '3994'}, {'itemId': '1573'}, {'itemId': '3527'}, {'itemId': '1748'}, {'itemId': '5378'}, {'itemId': '2232'}, {'itemId': '5349'}, {'itemId': '1625'}, {'itemId': '5445'}, {'itemId': '1909'}, {'itemId': '32'}, {'itemId': '733'}, {'itemId': '4975'}, {'itemId': '1676'}, {'itemId': '5418'}, {'itemId': '1396'}, {'itemId': '2858'}, {'itemId': '5481'}, {'itemId': '4226'}, {'itemId': '296'}, {'itemId': '3793'}, {'itemId': '2628'}]
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
ConclusionYou can see now that recommendations are altered by changing the movie that a user interacts with, this system can be modified to any application where users are interacting with a collection of items. These tools are available at any time to pull down and start exploring what is possible with the data you have.Finally when you are ready to remove the items from your account, open the `Cleanup.ipynb` notebook and execute the steps there.
eventTrackerArn = response['eventTrackerArn'] print("Tracker ARN is: " + str(eventTrackerArn))
_____no_output_____
MIT-0
getting_started/2.View_Campaign_And_Interactions.ipynb
lmorri/personalize-movielens-20m
* [Sudoku Scraper](https://github.com/apauliuc/sudoku-scraper)* [Scraped Website](https://www.menneske.no/sudoku/3x4/eng/)
import pandas as pd from google.colab import files # upload the uncleaned 12x12 data uploaded = files.upload() # loading and looking at the data df = pd.read_csv("uncleaned_sudokus_12x12.csv") print(df.shape) df.head() # cleaning dataset clean_df = df.copy() clean_df.head() # replace # '0' with '.' # '10' with 'A' # '11' with 'B' # '12' with 'C' clean_df['puzzle'] = clean_df['puzzle'].replace(r'0', '.', regex = True) clean_df = clean_df.replace(r'10', 'A', regex = True) clean_df = clean_df.replace(r'11', 'B', regex = True) clean_df = clean_df.replace(r'12', 'C', regex = True) clean_df.head() # remove spaces clean_df = clean_df.replace(r' ', '', regex = True) clean_df.head() # making a 'level', 'gridLength', 'row', and 'col' column clean_df['level'] = 'Hard' clean_df['gridLength'] = 12 clean_df['row'] = 3 clean_df['col'] = 4 clean_df.head() # rename 'puzzle' to 'sudoku' clean_df.rename(columns = {'puzzle': 'sudoku'}, inplace = True) clean_df.head() # download the clean csv clean_df.to_csv('12x12_puzzles.csv') files.download('12x12_puzzles.csv')
_____no_output_____
MIT
Notebooks/data/12x12_puzzles.ipynb
Lambda-School-Labs/omega2020-ds
Import the Libraries
import os import warnings warnings.filterwarnings('ignore') # importing packages import pandas as pd import re import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline # sklearn packages from sklearn import metrics from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold, RandomizedSearchCV from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from lightgbm import LGBMClassifier from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import KFold, StratifiedKFold import gc from sklearn.model_selection import StratifiedKFold plt.style.use("seaborn") %matplotlib inline plt.rcParams['figure.figsize'] = (10,8)
_____no_output_____
MIT
IndependenceDay_Hack_LightGBM.ipynb
Niranjankumar-c/IndiaML_Hiring_Hackathon_2019
Loading the data
#load the train and test data totaldf_onehot = pd.read_csv("totaldata_onehot.csv") #load the train data totaldf_onehot.head() #split the data into train and test traindf_cleaned = totaldf_onehot[totaldf_onehot["source"] == "train"].drop("source", axis = 1) testdf_cleaned = totaldf_onehot[totaldf_onehot["source"] == "test"].drop(["source", "m13"], axis = 1) traindf_cleaned.head() testdf_cleaned.head() submission_df = pd.read_csv("data/sample_submission.csv") submission_df.head() def kfold_lightgbm(train_df, test_df, submission_df,num_folds=3, stratified = True): dt_preds = {} print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape)) # Cross validation model if stratified: folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001) else: folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001) # Create arrays and dataframes to store results oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feature_importance_df = pd.DataFrame() feats = [f for f in train_df.columns if f not in ["m13"]] print(feats) for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df, train_df['m13'])): train_x, train_y = train_df[feats].iloc[train_idx], train_df['m13'].iloc[train_idx] valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['m13'].iloc[valid_idx] # LightGBM parameters found by Bayesian optimization clf = LGBMClassifier( nthread=4, n_estimators=10000, learning_rate=0.02, num_leaves=34, colsample_bytree=0.9497036, subsample=0.8715623, max_depth=8, reg_alpha=0.041545473, reg_lambda=0.0735294, min_split_gain=0.0222415, min_child_weight=39.3259775, silent=-1, verbose=-1, ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)], eval_metric= 'f1', verbose= 200, early_stopping_rounds= 200) oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1] sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits dt_preds[n_fold + 1] = clf.predict(valid_x) fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = feats fold_importance_df["importance"] = clf.feature_importances_ fold_importance_df["fold"] = n_fold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx]))) print('Fold %2d F1 : %.6f' % (n_fold + 1, metrics.f1_score(valid_y, dt_preds[n_fold + 1]))) del clf, train_x, train_y, valid_x, valid_y gc.collect() print('Full AUC score %.6f' % roc_auc_score(train_df['m13'], oof_preds)) # Write submission file and plot feature importance display_importances(feature_importance_df) return feature_importance_df, dt_preds # Display/plot feature importance def display_importances(feature_importance_df_): cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)] plt.figure(figsize=(8, 10)) sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title('LightGBM Features (avg over folds)') plt.tight_layout() plt.show() #plt.savefig('lgbm_importances01.png') feature_df, preds = kfold_lightgbm(traindf_cleaned, testdf_cleaned, submission_df, 3, True) pd.Series(preds).map( lambda x: 1 if x >= 0.2 else 0 ).value_counts() preds.value_counts()
_____no_output_____
MIT
IndependenceDay_Hack_LightGBM.ipynb
Niranjankumar-c/IndiaML_Hiring_Hackathon_2019
Collaborative filtering example `collab` models use data in a `DataFrame` of user, items, and ratings.
path = untar_data(URLs.ML_SAMPLE) path ratings = pd.read_csv(path/'ratings.csv') series2cat(ratings, 'userId', 'movieId') ratings.head() data = CollabDataBunch.from_df(ratings, seed=42) y_range = [0, 5.5]
_____no_output_____
Apache-2.0
examples/collab.ipynb
MartinGer/fastai
That's all we need to create and train a model:
learn = collab_learner(data, n_factors=50, y_range=y_range) learn.fit_one_cycle(4, 5e-3)
Total time: 00:02 epoch train_loss valid_loss 1 1.724424 1.277289 (00:00) 2 0.893744 0.678392 (00:00) 3 0.655527 0.651847 (00:00) 4 0.562305 0.649613 (00:00)
Apache-2.0
examples/collab.ipynb
MartinGer/fastai
📝 Exercise 00The goal of this exercise is to fit a similar model as in the previousnotebook to get familiar with manipulating scikit-learn objects and inparticular the `.fit/.predict/.score` API. Let's load the adult census dataset with only numerical variables
import pandas as pd adult_census = pd.read_csv("../datasets/adult-census-numeric.csv") data = adult_census.drop(columns="class") target = adult_census["class"]
_____no_output_____
CC-BY-4.0
notebooks/02_numerical_pipeline_ex_00.ipynb
khanfarhan10/scikit-learn-mooc