markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Read name frequencies
# TODO rewrite this using pandas too def load_name_freqs(path, is_surname): name_freqs = defaultdict(int) with fopen(path, mode="r", encoding="utf-8") as f: for line in f: fields = line.rstrip().split("\t") for name_piece in normalize(fields[0], is_surname): name_freqs[name_piece] = int(fields[1]) return name_freqs name_freqs = load_name_freqs(name_freqs_filename, is_surname) # keep only entries in all_names name_freqs = dict((add_padding(k),v) for k,v in name_freqs.items() if add_padding(k) in all_names) print(len(name_freqs), next(iter(name_freqs.items())))
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Load model
model = torch.load(model_filename)
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Encode names
MAX_NAME_LENGTH=30 char_to_idx_map, idx_to_char_map = build_token_idx_maps()
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Take a sample because encoded names require a lot of memory
if sample_size <= 0 or sample_size >= len(all_names): names_sample = np.array(list(all_names)) else: names_sample = np.array(random.sample(all_names, sample_size)) print(names_sample.shape)
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Compute encodings
# Get embeddings names_tensor, _ = convert_names_to_model_inputs(names_sample, char_to_idx_map, MAX_NAME_LENGTH) # Get encodings for the names from the encoder # TODO why do I need to encode in chunks? chunk_size = 10000 nps = [] for begin in tqdm(range(0, len(names_tensor), chunk_size)): nps.append(model(names_tensor[begin:begin+chunk_size], just_encoder=True).detach().numpy()) names_encoded = np.concatenate(nps, axis=0) nps = None names_encoded.shape
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Compute distances
name_candidates = get_best_matches(names_encoded, names_encoded, names_sample, num_candidates=num_candidates, metric='euclidean') # what's going on here? distances = np.hstack((np.repeat(names_sample, num_candidates)[:, np.newaxis], name_candidates.reshape(-1,2))) # remove distances > max_distance distances = distances[distances[:, -1].astype('float') <= max_distance] # sort distances = distances[distances[:, -1].astype('float').argsort()] print(distances.shape) name_candidates = None
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Compute closures
# iterate over all distances, create closures and save scores next_closure = 0 closure_ids = {} id_closure = {} row_ixs = [] col_ixs = [] dists = [] max_size = 0 for row in tqdm(distances): name1 = row[0] name2 = row[1] id1 = name_ids[name1] id2 = name_ids[name2] # each distance is in distances twice if id1 > id2: continue distance = max(eps, float(row[2])) closure1 = id_closure.get(id1) closure2 = id_closure.get(id2) if closure1 is None and closure2 is not None: id1, id2 = id2, id1 name1, name2 = name2, name1 closure1, closure2 = closure2, closure1 # add to distance matrix row_ixs.append(id1) col_ixs.append(id2) dists.append(distance) # skip if names are the same if id1 == id2: continue row_ixs.append(id2) col_ixs.append(id1) dists.append(distance) # create closures if closure1 is None: # if closure1 is None, then closure2 must be none also due to the above # so create a new closure with id1 and id2 closure1 = next_closure next_closure += 1 id_closure[id1] = closure1 id_closure[id2] = closure1 closure_ids[closure1] = [id1, id2] next_closure += 1 elif closure2 is None: # put id2 into id1's closure id_closure[id2] = closure1 closure_ids[closure1].append(id2) elif closure1 != closure2 and len(closure_ids[closure1]) + len(closure_ids[closure2]) <= max_closure_size: # move all ids in closure2 into closure1 for id in closure_ids[closure2]: id_closure[id] = closure1 closure_ids[closure1].append(id) del closure_ids[closure2] if len(closure_ids[closure1]) > max_size: max_size = len(closure_ids[closure1]) # create distances matrix dist_matrix = csr_matrix((dists, (row_ixs, col_ixs))) print("max closure_size", max_size) print("number of closures", len(closure_ids), "number of names enclosed", len(id_closure))
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Compute clusters
def compute_clusters(closure_ids, id_names, dist_matrix, linkage, distance_threshold, eps, max_dist): cluster_names = defaultdict(set) name_cluster = {} for closure, ids in tqdm(closure_ids.items()): clusterer = AgglomerativeClustering(n_clusters=None, affinity='precomputed', linkage=linkage, distance_threshold=distance_threshold) X = dist_matrix[ids][:, ids].todense() X[X < eps] = max_dist labels = clusterer.fit_predict(X) for id, label in zip(ids, labels): name = id_names[id] cluster = f'{closure}_{label}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster # try ward, average, single cluster_linkage = 'average' max_dist = 10.0 cluster_names, name_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, cluster_distance_threshold, eps, max_dist) print(len(cluster_names))
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Add unclustered names as singleton clusters
def add_singleton_names(cluster_names, name_cluster, names_sample): for ix, name in enumerate(names_sample): if name not in name_cluster: cluster = f'{ix}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster cluster_names, name_cluster = add_singleton_names(cluster_names, name_cluster, names_sample) print(len(cluster_names))
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Eval cluster P/R over Ancestry test data
train, test = load_train_test("../data/raw/records25k_data_train.csv", "../data/raw/records25k_data_test.csv") _, _, candidates_train = train input_names_test, weighted_relevant_names_test, candidates_test = test all_candidates = np.concatenate((candidates_train, candidates_test)) def get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster): names_sample_set = set(names_sample.tolist()) all_candidates_set = set(all_candidates.tolist()) precisions = [] recalls = [] for input_name, weighted_relevant_names in zip(input_names_test, weighted_relevant_names_test): if input_name not in names_sample_set: continue cluster_id = name_cluster[input_name] names_in_cluster = cluster_names[cluster_id] & all_candidates_set found_recall = 0.0 total_recall = 0.0 found_count = 0 for name, weight, _ in weighted_relevant_names: if name in names_sample_set: total_recall += weight if name in names_in_cluster: found_recall += weight found_count += 1 if total_recall == 0.0: continue precision = found_count / len(names_in_cluster) if len(names_in_cluster) > 0 else 1.0 recall = found_recall / total_recall precisions.append(precision) recalls.append(recall) avg_precision = sum(precisions) / len(precisions) avg_recall = sum(recalls) / len(recalls) return avg_precision, avg_recall, len(precisions) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall)
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Write clusters
def write_clusters(path, cluster_names, name_freqs, name_nicks): cluster_id_name_map = {} with fopen(path, mode="w", encoding="utf-8") as f: for cluster_id, names in cluster_names.items(): # get most-frequent name cluster_name = max(names, key=(lambda name: name_freqs.get(name, 0))) # map cluster id to cluster name cluster_id_name_map[cluster_id] = cluster_name # add nicknames nicknames = set() if name_nicks: for name in names: if name in name_nicks: nicknames.update(name_nicks[name]) # remove padding cluster_name = remove_padding(cluster_name) names = [remove_padding(name) for name in names | nicknames] # write cluster f.write(f'{cluster_name}\t{" ".join(names)}\n') return cluster_id_name_map cluster_id_name_map = write_clusters(clusters_filename, cluster_names, name_freqs, name_nicks)
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Create super-clusters
super_cluster_names, name_super_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, super_cluster_distance_threshold, eps, max_dist) print(len(super_cluster_names)) super_cluster_names, name_super_cluster = add_singleton_names(super_cluster_names, name_super_cluster, names_sample) print(len(super_cluster_names)) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, super_cluster_names, name_super_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall) # get cluster names for each name in super cluster super_cluster_clusters = {id: set([cluster_id_name_map[name_cluster[name]] for name in names]) for id, names in super_cluster_names.items()}
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
Write super-clusters
_ = write_clusters(super_clusters_filename, super_cluster_clusters, name_freqs, None)
_____no_output_____
MIT
reports/80_cluster_anc_triplet-initial.ipynb
rootsdev/nama
This notebook gives a 30 second introduction to the Vortexa SDK First let's import our requirements
from datetime import datetime import vortexasdk as v
_____no_output_____
Apache-2.0
docs/examples/try_me_out/voyages_congestion_breakdown.ipynb
V0RT3X4/python-sdk
Now let's load a dataframe of a sum of vessels in congestion. You'll need to enter your Vortexa API key when prompted.
df = v.VoyagesCongestionBreakdown()\ .search( time_min=datetime(2021, 8, 1, 0), time_max=datetime(2021, 8, 1, 23))\ .to_df() df.head()
_____no_output_____
Apache-2.0
docs/examples/try_me_out/voyages_congestion_breakdown.ipynb
V0RT3X4/python-sdk
Copyright 2019 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Load images with tf.data View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
from __future__ import absolute_import, division, print_function, unicode_literals !pip install tensorflow==2.0.0-beta1 import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Download and inspect the dataset Retrieve the imagesBefore you start any training, you will need a set of images to teach the network about the new classes you want to recognize. You have already created an archive of creative-commons licensed flower photos to use initially:
import pathlib data_root_orig = tf.keras.utils.get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', fname='flower_photos', untar=True) data_root = pathlib.Path(data_root_orig) print(data_root)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
After downloading 218MB, you should now have a copy of the flower photos available:
for item in data_root.iterdir(): print(item) import random all_image_paths = list(data_root.glob('*/*')) all_image_paths = [str(path) for path in all_image_paths] random.shuffle(all_image_paths) image_count = len(all_image_paths) image_count all_image_paths[:10]
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you are dealing with:
import os attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:] attributions = [line.split(' CC-BY') for line in attributions] attributions = dict(attributions) import IPython.display as display def caption_image(image_path): image_rel = pathlib.Path(image_path).relative_to(data_root) return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1]) for n in range(3): image_path = random.choice(all_image_paths) display.display(display.Image(image_path)) print(caption_image(image_path)) print()
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Determine the label for each image List the available labels:
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir()) label_names
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Assign an index to each label:
label_to_index = dict((name, index) for index, name in enumerate(label_names)) label_to_index
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Create a list of every file, and its label index:
all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths] print("First 10 labels indices: ", all_image_labels[:10])
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Load and format the images TensorFlow includes all the tools you need to load and process images:
img_path = all_image_paths[0] img_path
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Here is the raw data:
img_raw = tf.io.read_file(img_path) print(repr(img_raw)[:100]+"...")
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Decode it into an image tensor:
img_tensor = tf.image.decode_image(img_raw) print(img_tensor.shape) print(img_tensor.dtype)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Resize it for your model:
img_final = tf.image.resize(img_tensor, [192, 192]) img_final = img_final/255.0 print(img_final.shape) print(img_final.numpy().min()) print(img_final.numpy().max())
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Wrap up these up in simple functions for later.
def preprocess_image(image): image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [192, 192]) image /= 255.0 # normalize to [0,1] range return image def load_and_preprocess_image(path): image = tf.io.read_file(path) return preprocess_image(image) import matplotlib.pyplot as plt image_path = all_image_paths[0] label = all_image_labels[0] plt.imshow(load_and_preprocess_image(img_path)) plt.grid(False) plt.xlabel(caption_image(img_path)) plt.title(label_names[label].title()) print()
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings, results in a dataset of strings:
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
The `shapes` and `types` describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
print(path_ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) for n, image in enumerate(image_ds.take(4)): plt.subplot(2,2,n+1) plt.imshow(image) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.xlabel(caption_image(all_image_paths[n])) plt.show()
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels:
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64)) for label in label_ds.take(10): print(label_names[label.numpy()])
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs:
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
print(image_label_ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Note: When you have arrays like `all_image_labels` and `all_image_paths` an alternative to `tf.data.dataset.Dataset.zip` is to slice the pair of arrays.
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) # The tuples are unpacked into the positional arguments of the mapped function def load_and_preprocess_from_path_label(path, label): return load_and_preprocess_image(path), label image_label_ds = ds.map(load_and_preprocess_from_path_label) image_label_ds
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* Batches to be available as soon as possible.These features can be easily added using the `tf.data` api.
BATCH_SIZE = 32 # Setting a shuffle buffer size as large as the dataset ensures that the data is # completely shuffled. ds = image_label_ds.shuffle(buffer_size=image_count) ds = ds.repeat() ds = ds.batch(BATCH_SIZE) # `prefetch` lets the dataset fetch batches in the background while the model is training. ds = ds.prefetch(buffer_size=AUTOTUNE) ds
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
There are a few things to note here:1. The order is important. * A `.shuffle` after a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` after a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. You use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffeled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE) ds = ds.prefetch(buffer_size=AUTOTUNE) ds
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False) mobile_net.trainable=False
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... Before you pass the input to the MobilNet model, you need to convert it from a range of `[0,1]` to `[-1,1]`:
def change_range(image,label): return 2*image-1, label keras_ds = ds.map(change_range)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
# The dataset may take a few seconds to start, as it fills its shuffle buffer. image_batch, label_batch = next(iter(keras_ds)) feature_map_batch = mobile_net(image_batch) print(feature_map_batch.shape)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Build a model wrapped around MobileNet and use `tf.keras.layers.GlobalAveragePooling2D` to average over those space dimensions before the output `tf.keras.layers.Dense` layer:
model = tf.keras.Sequential([ mobile_net, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(len(label_names))])
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Now it produces outputs of the expected shape:
logit_batch = model(image_batch).numpy() print("min logit:", logit_batch.min()) print("max logit:", logit_batch.max()) print() print("Shape:", logit_batch.shape)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Compile the model to describe the training procedure:
model.compile(optimizer=tf.keras.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=["accuracy"])
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
There are 2 trainable variables - the Dense `weights` and `bias`:
len(model.trainable_variables) model.summary()
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
You are ready to train the model.Note that for demonstration purposes you will only run 3 steps per epoch, but normally you would specify the real number of steps, as defined below, before passing it to `model.fit()`:
steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy() steps_per_epoch model.fit(ds, epochs=1, steps_per_epoch=3)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU, but may not be sufficient for GPU training and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
import time default_timeit_steps = 2*steps_per_epoch+1 def timeit(ds, steps=default_timeit_steps): overall_start = time.time() # Fetch a single batch to prime the pipeline (fill the shuffle buffer), # before starting the timer it = iter(ds.take(steps+1)) next(it) start = time.time() for i,(images,labels) in enumerate(it): if i%10 == 0: print('.',end='') print() end = time.time() duration = end-start print("{} batches: {} s".format(steps, duration)) print("{:0.5f} Images/s".format(BATCH_SIZE*steps/duration)) print("Total time: {}s".format(end-overall_start))
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
The performance of the current dataset is:
ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is very efficient, especially when the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
ds = image_label_ds.cache() ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) ds timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
One disadvantage to using an in memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
If the data doesn't fit in memory, use a cache file:
ds = image_label_ds.cache(filename='./cache.tf-data') ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(BATCH_SIZE).prefetch(1) ds timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
TFRecord File Raw image dataTFRecord files are a simple format to store a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.io.read_file) tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(image_ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Next, build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier:
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Zip that dataset with the labels dataset you defined earlier to get the expected `(image,label)` pairs:
ds = tf.data.Dataset.zip((image_ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths) image_ds = paths_ds.map(load_and_preprocess_image) image_ds
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Now instead of a dataset of `.jpeg` strings, you have a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings:
ds = image_ds.map(tf.io.serialize_tensor) ds tfrec = tf.data.experimental.TFRecordWriter('images.tfrec') tfrec.write(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
With the preprocessing cached, data can be loaded from the TFrecord file quite efficiently - just remember to de-serialize tensor before using it:
ds = tf.data.TFRecordDataset('images.tfrec') def parse(x): result = tf.io.parse_tensor(x, out_type=tf.float32) result = tf.reshape(result, [192, 192, 3]) return result ds = ds.map(parse, num_parallel_calls=AUTOTUNE) ds
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Now, add the labels and apply the same standard operations, as before:
ds = tf.data.Dataset.zip((ds, label_ds)) ds = ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE) ds timeit(ds)
_____no_output_____
Apache-2.0
site/en/r2/tutorials/load_data/images.ipynb
Terahezi/docs
Loss Functions> Custom fastai loss functions
F.binary_cross_entropy_with_logits(torch.randn(4,5), torch.randint(0, 2, (4,5)).float(), reduction='none') funcs_kwargs # export @log_args class BaseLoss(): "Same as `loss_cls`, but flattens input and target." activation=decodes=noops def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs): store_attr("axis,flatten,floatify,is_2d") self.func = loss_cls(*args,**kwargs) functools.update_wrapper(self, self.func) def __repr__(self): return f"FlattenedLoss of {self.func}" @property def reduction(self): return self.func.reduction @reduction.setter def reduction(self, v): self.func.reduction = v def __call__(self, inp, targ, **kwargs): inp = inp .transpose(self.axis,-1).contiguous() targ = targ.transpose(self.axis,-1).contiguous() if self.floatify and targ.dtype!=torch.float16: targ = targ.float() if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long() if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1) return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs)
_____no_output_____
Apache-2.0
nbs/01a_losses.ipynb
aaminggo/fastai
Wrapping a general loss function inside of `BaseLoss` provides extra functionalities to your loss functions:- flattens the tensors before trying to take the losses since it's more convenient (with a potential tranpose to put `axis` at the end)- a potential `activation` method that tells the library if there is an activation fused in the loss (useful for inference and methods such as `Learner.get_preds` or `Learner.predict`)- a potential decodes method that is used on predictions in inference (for instance, an argmax in classification) The `args` and `kwargs` will be passed to `loss_cls` during the initialization to instantiate a loss function. `axis` is put at the end for losses like softmax that are often performed on the last axis. If `floatify=True`, the `targs` will be converted to floats (useful for losses that only accept float targets like `BCEWithLogitsLoss`), and `is_2d` determines if we flatten while keeping the first dimension (batch size) or completely flatten the input. We want the first for losses like Cross Entropy, and the second for pretty much anything else.
# export @log_args @delegates() class CrossEntropyLossFlat(BaseLoss): "Same as `nn.CrossEntropyLoss`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) def decodes(self, x): return x.argmax(dim=self.axis) def activation(self, x): return F.softmax(x, dim=self.axis) tst = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) #nn.CrossEntropy would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.CrossEntropyLoss()(output,target)) #Associated activation is softmax test_eq(tst.activation(output), F.softmax(output, dim=-1)) #This loss function has a decodes which is argmax test_eq(tst.decodes(output), output.argmax(dim=-1)) #In a segmentation task, we want to take the softmax over the channel dimension tst = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) _ = tst(output, target) test_eq(tst.activation(output), F.softmax(output, dim=1)) test_eq(tst.decodes(output), output.argmax(dim=1)) # export @log_args @delegates() class BCEWithLogitsLossFlat(BaseLoss): "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." @use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None) def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs): super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) self.thresh = thresh def decodes(self, x): return x>self.thresh def activation(self, x): return torch.sigmoid(x) tst = BCEWithLogitsLossFlat() output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) #nn.BCEWithLogitsLoss would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) output = torch.randn(32, 5) target = torch.randint(0,2,(32, 5)) #nn.BCEWithLogitsLoss would fail with int targets but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) #Associated activation is sigmoid test_eq(tst.activation(output), torch.sigmoid(output)) # export @log_args(to_return=True) @use_kwargs_dict(weight=None, reduction='mean') def BCELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.BCELoss`, but flattens input and target." return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = BCELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.BCELoss()(output,target)) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def MSELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.MSELoss`, but flattens input and target." return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = MSELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.MSELoss()(output,target)) #hide #cuda #Test losses work in half precision output = torch.sigmoid(torch.randn(32, 5, 10)).half().cuda() target = torch.randint(0,2,(32, 5, 10)).half().cuda() for tst in [BCELossFlat(), MSELossFlat()]: _ = tst(output, target) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def L1LossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.L1Loss`, but flattens input and target." return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) #export @log_args class LabelSmoothingCrossEntropy(Module): y_int = True def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction def forward(self, output, target): c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1)
_____no_output_____
Apache-2.0
nbs/01a_losses.ipynb
aaminggo/fastai
On top of the formula we define:- a `reduction` attribute, that will be used when we call `Learner.get_preds`- an `activation` function that represents the activation fused in the loss (since we use cross entropy behind the scenes). It will be applied to the output of the model when calling `Learner.get_preds` or `Learner.predict`- a decodes function that converts the output of the model to a format similar to the target (here indices). This is used in `Learner.predict` and `Learner.show_results` to decode the predictions
#export @log_args @delegates() class LabelSmoothingCrossEntropyFlat(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1)
_____no_output_____
Apache-2.0
nbs/01a_losses.ipynb
aaminggo/fastai
Export -
#hide from nbdev.export import * notebook2script()
Converted 00_torch_core.ipynb. Converted 01_layers.ipynb. Converted 02_data.load.ipynb. Converted 03_data.core.ipynb. Converted 04_data.external.ipynb. Converted 05_data.transforms.ipynb. Converted 06_data.block.ipynb. Converted 07_vision.core.ipynb. Converted 08_vision.data.ipynb. Converted 09_vision.augment.ipynb. Converted 09b_vision.utils.ipynb. Converted 09c_vision.widgets.ipynb. Converted 10_tutorial.pets.ipynb. Converted 11_vision.models.xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_callback.core.ipynb. Converted 13a_learner.ipynb. Converted 13b_metrics.ipynb. Converted 14_callback.schedule.ipynb. Converted 14a_callback.data.ipynb. Converted 15_callback.hook.ipynb. Converted 15a_vision.models.unet.ipynb. Converted 16_callback.progress.ipynb. Converted 17_callback.tracker.ipynb. Converted 18_callback.fp16.ipynb. Converted 18a_callback.training.ipynb. Converted 19_callback.mixup.ipynb. Converted 20_interpret.ipynb. Converted 20a_distributed.ipynb. Converted 21_vision.learner.ipynb. Converted 22_tutorial.imagenette.ipynb. Converted 23_tutorial.vision.ipynb. Converted 24_tutorial.siamese.ipynb. Converted 24_vision.gan.ipynb. Converted 30_text.core.ipynb. Converted 31_text.data.ipynb. Converted 32_text.models.awdlstm.ipynb. Converted 33_text.models.core.ipynb. Converted 34_callback.rnn.ipynb. Converted 35_tutorial.wikitext.ipynb. Converted 36_text.models.qrnn.ipynb. Converted 37_text.learner.ipynb. Converted 38_tutorial.text.ipynb. Converted 40_tabular.core.ipynb. Converted 41_tabular.data.ipynb. Converted 42_tabular.model.ipynb. Converted 43_tabular.learner.ipynb. Converted 44_tutorial.tabular.ipynb. Converted 45_collab.ipynb. Converted 46_tutorial.collab.ipynb. Converted 50_tutorial.datablock.ipynb. Converted 60_medical.imaging.ipynb. Converted 61_tutorial.medical_imaging.ipynb. Converted 65_medical.text.ipynb. Converted 70_callback.wandb.ipynb. Converted 71_callback.tensorboard.ipynb. Converted 72_callback.neptune.ipynb. Converted 73_callback.captum.ipynb. Converted 74_callback.cutmix.ipynb. Converted 97_test_utils.ipynb. Converted 99_pytorch_doc.ipynb. Converted index.ipynb. Converted tutorial.ipynb.
Apache-2.0
nbs/01a_losses.ipynb
aaminggo/fastai
Trusted Notebook" width="500 px" align="left"> Qiskit Tutorials***Welcome Qiskitters.The easiest way to get started is to use [the Binder image](https://mybinder.org/v2/gh/qiskit/qiskit-tutorials/master?filepath=index.ipynb), which lets you use the notebooks via the web. This means that you don't need to download or install anything, but is also means that you should not insert any private information into the notebooks (such as your API key). We recommend that after you are done using mybinder that you regenerate your token. The tutorials can be downloaded by clicking [here](https://github.com/Qiskit/qiskit-tutorials/archive/master.zip) and to set them up follow the installation instructions [here](https://github.com/Qiskit/qiskit-tutorial/blob/master/INSTALL.md).*** ContentsWe have organized the tutorials into two sections: 1. QiskitThese tutorials aim to explain how to use Qiskit. We assume you have installed Qiskit if not please look at [qiskit.org](http://www.qiskit.org) or the install [documentation](https://github.com/qiskit/qiskit-tutorial/blob/master/INSTALL.md). We've collected a core reference set of notebooks in this section outlining the features of Qiskit. We will be keeping them up to date with the latest Qiskit version, currently 0.7. The focus of this section will be how to use Qiskit and not so much on teaching you about quantum computing. For those interested in learning about quantum computing we recommend the awesome notebooks in the community section.Qiskit is made up of four elements: Terra, Aer, Ignis, and Aqua with each element having its own goal and together they make the full Qiskit framework. 1.1 Getting started with QiskitA central goal of Qiskit is to build a software stack that makes it easy for anyone to use quantum computers. To get developers and researchers going we have a set of tutorials on the basics. * [Getting started with Qiskit](qiskit/basics/getting_started_with_qiskit.ipynb) - how to use Qiskit * [The IBM Q provider](qiskit/basics/the_ibmq_provider.ipynb) - working with the IBM Q devices * [Plotting data in Qiskit](qiskit/basics/plotting_data_in_qiskit.ipynb) - illustrates the different ways of plotting data in Qiskit 1.2 Qiskit TerraTerra, the ‘earth’ element, is the foundation on which the rest of the software lies. Terra provides a bedrock for composing quantum programs at the level of circuits and pulses, to optimize them for the constraints of a particular device, and to manage the execution of batches of experiments on remote-access devices. Terra defines the interfaces for a desirable end-user experience, as well as the efficient handling of layers of optimization, pulse scheduling and backend communication. * [Quantum circuits](qiskit/terra/quantum_circuits.ipynb) - gives a summary of the `QuantumCircuit` object * [Visualizing a quantum circuit](qiskit/terra/visualizing_a_quantum_circuit.ipynb) - details on drawing your quantum circuits * [Summary of quantum operations](qiskit/terra/summary_of_quantum_operations.ipynb) - list of quantum operations (gates, reset, measurements) in Qiskit Terra * [Monitoring jobs and backends](qiskit/terra/backend_monitoring_tools.ipynb) - tools for monitoring jobs and backends * [Parallel tools](qiskit/terra/terra_parallel_tools.ipynb) - executing tasks in parallel using `parallel_map` and tracking progress * [Creating a new provider](qiskit/terra/creating_a_provider.ipynb) - a guide to integration of a new provider with Qiskit structures and interfaces 1.3 Qiskit Interacitve Plotting and Jupyter ToolsTo improve the Qiskit user experience we have made many of the visualizations interactive and developed some very cool new job monitoring tools in Jupyter. * [Jupyter tools for Monitoring jobs and backends](qiskit/jupyter/jupyter_backend_tools.ipynb) - Jupyter tools for monitoring jobs and backends 1.4 Qiskit AerAer, the ‘air’ element, permeates all Qiskit elements. To really speed up development of quantum computers we need better simulators with the ability to model realistic noise processes that occur during computation on actual devices. Aer provides a high-performance simulator framework for studying quantum computing algorithms and applications in the noisy intermediate scale quantum regime. * [Aer provider](qiskit/aer/aer_provider.ipynb) - gives a summary of the Qiskit Aer provider containing the Qasm, statevector, and unitary simulator * [Device noise simulation](qiskit/aer/device_noise_simulation.ipynb) - shows how to use the Qiskit Aer noise module to automatically generate a basic noise model for simulating hardware backends 1.5 Qiskit IgnisIgnis, the ‘fire’ element, is dedicated to fighting noise and errors and to forging a new path. This includes better characterization of errors, improving gates, and computing in the presence of noise. Ignis is meant for those who want to design quantum error correction codes, or who wish to study ways to characterize errors through methods such as tomography, or even to find a better way for using gates by exploring dynamical decoupling and optimal control. While we have already released parts of this element as part of libraries in Terra, an official stand-alone release will come soon. For now we have some tutorials for you to explore. * [Relaxation and decoherence](qiskit/ignis/relaxation_and_decoherence.ipynb) - how to measure coherence times on the real quantum hardware * [Quantum state tomography](qiskit/ignis/state_tomography.ipynb) - how to identify a quantum state using state tomography, in which the state is prepared repeatedly and measured in different bases * [Quantum process tomography](qiskit/ignis/process_tomography.ipynb) - using quantum process tomography to reconstruct the behavior of a quantum process and measure its fidelity, i.e., how closely it matches the ideal version 1.6 Qiskit AquaAqua, the ‘water’ element, is the element of life. To make quantum computing live up to its expectations, we need to find real-world applications. Aqua is where algorithms for NISQ computers are built. These algorithms can be used to build applications for quantum computing. Aqua is accessible to domain experts in chemistry, optimization, AI or finance, who want to explore the benefits of using quantum computers as accelerators for specific computational tasks, without needing to worry about how to translate the problem into the language of quantum machines. * [Chemistry](qiskit/aqua/chemistry/index.ipynb) - using variational quantum eigensolver to experiment with molecular ground-state energy on a quantum computer * [Optimization](qiskit/aqua/optimization/index.ipynb) - using variational quantum eigensolver to experiment with optimization problems (maxcut and traveling salesman problem) on a quantum computer * [Artificial Intelligence](qiskit/aqua/artificial_intelligence/index.ipynb) - using quantum-enhanced support vector machine to experiment with classification problems on a quantum computer * [Finance](qiskit/aqua/finance/index.ipynb) - using variational quantum eigensolver to optimize portfolio on a quantum computer 2. Community NotebooksTeaching quantum and qiskit has so many different paths of learning. We love our community and we love the contributions so keep them coming. Because Qiskit is changing so much we can't keep this updated (we will try our best) but there are some great notebooks in here. 2.1 [Hello, Quantum World with Qiskit](community/hello_world/) Learn from the community how to write your first quantum program. 2.2 [Quantum Games with Qiskit](community/games/)Learn quantum computing by having fun. How is there a better way! 2.3 [Quantum Information Science with Qiskit Terra](community/terra/index.ipynb)Learn about and how to program quantum circuits using Qiskit Terra. 2.4 [Textbook Quantum Algorithms with Qiskit Terra](community/algorithms/index.ipynb)Learn about textbook quantum algorithms, like Deutsch-Jozsa, Grover, and Shor using Qiskit Terra. 2.5 [Developing Quantum Applications with Qiskit Aqua](community/aqua/index.ipynb)Learn how to develop and the fundamentals of quantum applications using Qiskit Aqua 2.6 AwardsLearn from the great contributions to the [IBM Q Awards](https://qe-awards.mybluemix.net/)* [Teach Me Qiskit 2018](community/awards/teach_me_qiskit_2018/index.ipynb)* [Teach Me Quantum 2018](community/awards/teach_me_quantum_2018/index.ipynb)
from IPython.display import display, Markdown with open('index.md', 'r') as readme: content = readme.read(); display(Markdown(content))
_____no_output_____
Apache-2.0
index.ipynb
Chibikuri/qiskit-tutorials
The Central Limit Theorem Very few of the data histograms that we have seen in this course have been bell shaped. When we have come across a bell shaped distribution, it has almost invariably been an empirical histogram of a statistic based on a random sample. **The Central Limit Theorem says that the probability distribution of the sum or average of a large random sample drawn with replacement will be roughly normal, *regardless of the distribution of the population from which the sample is drawn*.**As we noted when we were studying Chebychev's bounds, results that can be applied to random samples *regardless of the distribution of the population* are very powerful, because in data science we rarely know the distribution of the population.The Central Limit Theorem makes it possible to make inferences with very little knowledge about the population, provided we have a large random sample. That is why it is central to the field of statistical inference. Proportion of Purple Flowers Recall Mendel's probability model for the colors of the flowers of a species of pea plant. The model says that the flower colors of the plants are like draws made at random with replacement from {Purple, Purple, Purple, White}.In a large sample of plants, about what proportion will have purple flowers? We would expect the answer to be about 0.75, the proportion purple in the model. And, because proportions are means, the Central Limit Theorem says that the distribution of the sample proportion of purple plants is roughly normal.We can confirm this by simulation. Let's simulate the proportion of purple-flowered plants in a sample of 200 plants.
colors = make_array('Purple', 'Purple', 'Purple', 'White') model = Table().with_column('Color', colors) model props = make_array() num_plants = 200 repetitions = 1000 for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) props[:5] opts = { 'title': 'Distribution of sample proportions', 'xlabel': 'Sample Proportion', 'ylabel': 'Percent per unit', 'xlim': (0.64, 0.84), 'ylim': (0, 25), 'bins': 20, } nbi.hist(props, options=opts)
_____no_output_____
BSD-3-Clause
packages/nbinteract-core/example-notebooks/examples_central_limit_theorem.ipynb
samlaf/nbinteract
There's that normal curve again, as predicted by the Central Limit Theorem, centered at around 0.75 just as you would expect.How would this distribution change if we increased the sample size? We can copy our sampling code into a function and then use interaction to see how the distribution changes as the sample size increases.We will keep the number of `repetitions` the same as before so that the two columns have the same length.
def empirical_props(num_plants): props = make_array() for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) return props nbi.hist(empirical_props, options=opts, num_plants=widgets.ToggleButtons(options=[100, 200, 400, 800]))
_____no_output_____
BSD-3-Clause
packages/nbinteract-core/example-notebooks/examples_central_limit_theorem.ipynb
samlaf/nbinteract
Plotting aggregate variablesPyam offers many great visualisation and analysis tools. In this notebook we highlight the `aggregate` and `stack_plot` methods of an `IamDataFrame`.
import numpy as np import pandas as pd import pyam %matplotlib inline import matplotlib.pyplot as plt
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
Here we provide some sample data for this tutorial. This data is for a single model-scenario-region combination but provides multiple subsectors of CO$_2$ emissions. The emissions in the subsectors are both positive and negative and so provide a good test of the flexibility of our aggregation and plotting routines.
df = pyam.IamDataFrame(pd.DataFrame([ ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Oil', 'Mt CO2/yr', 2, 3.2, 2.0, 1.8], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Gas', 'Mt CO2/yr', 1.3, 1.6, 1.0, 0.7], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|BECCS', 'Mt CO2/yr', 0.0, 0.4, -0.4, 0.3], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Cars', 'Mt CO2/yr', 1.6, 3.8, 3.0, 2.5], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Tar', 'Mt CO2/yr', 0.3, 0.35, 0.35, 0.33], ['IMG', 'a_scen', 'World', 'Emissions|CO2|Agg', 'Mt CO2/yr', 0.5, -0.1, -0.5, -0.7], ['IMG', 'a_scen', 'World', 'Emissions|CO2|LUC', 'Mt CO2/yr', -0.3, -0.6, -1.2, -1.0] ], columns=['model', 'scenario', 'region', 'variable', 'unit', 2005, 2010, 2015, 2020], )) df.head()
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
Pyam's `stack_plot` method plots the stacks in the clearest way possible, even when some emissions are negative. The optional `total` keyword arguments also allows the user to include a total line on their plot.
df.stack_plot(); df.stack_plot(total=True);
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
The appearance of the stackplot can be simply controlled via ``kwargs``. The appearance of the total line is controlled by passing a dictionary to the `total_kwargs` keyword argument.
df.stack_plot(alpha=0.5, total={"color": "grey", "ls": "--", "lw": 2.0});
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
If the user wishes, they can firstly filter their data before plotting.
df.filter(variable="Emissions|CO2|Energy*").stack_plot(total=True);
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
Using `aggregate`, it is possible to create arbitrary sums of sub-sectors before plotting.
pdf = df.copy() afoluluc_vars = ["Emissions|CO2|LUC", "Emissions|CO2|Agg"] fossil_vars = list(set(pdf.variables()) - set(afoluluc_vars)) pdf.aggregate( "Emissions|CO2|AFOLULUC", components=afoluluc_vars, append=True ) pdf.aggregate( "Emissions|CO2|Fossil", components=fossil_vars, append=True ) pdf.filter(variable=[ "Emissions|CO2|AFOLULUC", "Emissions|CO2|Fossil" ]).stack_plot(total=True);
_____no_output_____
Apache-2.0
doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb
peterkolp/pyam
Author: Saeed Amen (@thalesians) - Managing Director & Co-founder of [the Thalesians](http://www.thalesians.com) Introduction With the UK general election in early May 2015, we thought it would be a fun exercise to demonstrate how you can investigate market price action over historial elections. We shall be using Python, together with Plotly for plotting. Plotly is a free web-based platform for making graphs. You can keep graphs private, make them public, and run Plotly on your [Plotly Enterprise on your own servers](https://plot.ly/product/enterprise/). You can find more details [here](https://plot.ly/python/getting-started/). Getting market data with Bloomberg To get market data, we shall be using Bloomberg. As a starting point, we have used bbg_py from [Brian Smith's TIA project](https://github.com/bpsmith/tia/tree/master/tia/bbg), which allows you to access Bloomberg via COM (older method), modifying it to make it compatible for Python 3.4. Whilst, we shall note use it to access historical daily data, there are functions which enable us to download intraday data. This method is only compatible with 32 bit versions of Python and assumes you are running the code on a Bloomberg terminal (it won't work without a valid Bloomberg licence).In my opinion a better way to access Bloomberg via Python, is via the official Bloomberg open source Python Open Source Graphing Library, however, at time of writing the official version is not yet compatible with Python 3.4. Fil Mackay has created a Python 3.4 compatible version of this [here](https://github.com/filmackay/blpapi-py), which I have used successfully. Whilst it takes slightly more time to configure (and compile using Windows SDK 7.1), it has the benefit of being compatible with 64 bit Python, which I have found invaluable in my analysis (have a read of [this](http://ta.speot.is/2012/04/09/visual-studio-2010-sp1-windows-sdk-7-1-install-order/) in case of failed installations of Windows SDK 7.1).Quandl can be used as an alternative data source, if you don't have access to a Bloomberg terminal, which I have also included in the code. Breaking down the steps in Python Our project will consist of several parts:- bbg_com - low level interaction with BBG COM object (adapted for Python 3.4) (which we are simply calling)- datadownloader - wrapper for BBG COM, Quandl and CSV access to data- eventplot - reusuable functions for interacting with Plotly and creating event studies- ukelection - kicks off the whole script process Downloading the market data As with any sort of financial market analysis, the first step is obtaining market data. We create the DataDownloader class, which acts a wrapper for Bloomberg, Quandl and CSV market data. We write a single function "download_time_series" for this. We could of course extend this for other data sources such as Yahoo Finance. Our output will be Pandas based dataframes. We want to make this code generic, so the tickers are not hard coded.
# for time series manipulation import pandas class DataDownloader: def download_time_series(self, vendor_ticker, pretty_ticker, start_date, source, csv_file = None): if source == 'Quandl': import Quandl # Quandl requires API key for large number of daily downloads # https://www.quandl.com/help/api spot = Quandl.get(vendor_ticker) # Bank of England's database on Quandl spot = pandas.DataFrame(data=spot['Value'], index=spot.index) spot.columns = [pretty_ticker] elif source == 'Bloomberg': from bbg_com import HistoricalDataRequest req = HistoricalDataRequest([vendor_ticker], ['PX_LAST'], start = start_date) req.execute() spot = req.response_as_single() spot.columns = [pretty_ticker] elif source == 'CSV': dateparse = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d') # in case you want to use a source other than Bloomberg/Quandl spot = pandas.read_csv(csv_file, index_col=0, parse_dates=0, date_parser=dateparse) return spot
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Generic functions for event study and Plotly plotting We now focus our efforts on the EventPlot class. Here we shall do our basic analysis. We shall aslo create functions for creating plotly traces and layouts that we shall reuse a number of times. The analysis we shall conduct is fairly simple. Given a time series of spot, and a number of dates, we shall create an event study around these times for that asset. We also include the "Mean" move over all the various dates.
# for dates import datetime # time series manipulation import pandas # for plotting data import plotly from plotly.graph_objs import * class EventPlot: def event_study(self, spot, dates, pre, post, mean_label = 'Mean'): # event_study - calculates the asset price moves over windows around event days # # spot = price of asset to study # dates = event days to anchor our event study # pre = days before the event day to start our study # post = days after the event day to start our study # data_frame = pandas.DataFrame() # for each date grab spot data the days before and after for i in range(0, len(dates)): mid_index = spot.index.searchsorted(dates[i]) start_index = mid_index + pre finish_index = mid_index + post + 1 x = (spot.ix[start_index:finish_index])[spot.columns.values[0]] data_frame[dates[i]] = x.values data_frame.index = range(pre, post + 1) data_frame = data_frame / data_frame.shift(1) - 1 # returns # add the mean on to the end data_frame[mean_label] = data_frame.mean(axis=1) data_frame = 100.0 * (1.0 + data_frame).cumprod() # index data_frame.ix[pre,:] = 100 return data_frame
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
We write a function to convert dates represented in a string format to Python format.
def parse_dates(self, str_dates): # parse_dates - parses string dates into Python format # # str_dates = dates to be parsed in the format of day/month/year # dates = [] for d in str_dates: dates.append(datetime.datetime.strptime(d, '%d/%m/%Y')) return dates EventPlot.parse_dates = parse_dates
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Our next focus is on the Plotly functions which create a layout. This enables us to specify axes labels, the width and height of the final plot and so on. We could of course add further properties into it.
def create_layout(self, title, xaxis, yaxis, width = -1, height = -1): # create_layout - populates a layout object # title = title of the plot # xaxis = xaxis label # yaxis = yaxis label # width (optional) = width of plot # height (optional) = height of plot # layout = Layout( title = title, xaxis = plotly.graph_objs.XAxis( title = xaxis, showgrid = False ), yaxis = plotly.graph_objs.YAxis( title= yaxis, showline = False ) ) if width > 0 and height > 0: layout['width'] = width layout['height'] = height return layout EventPlot.create_layout = create_layout
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Earlier, in the DataDownloader class, our output was Pandas based dataframes. Our convert_df_plotly function will convert these each series from Pandas dataframe into plotly traces. Along the way, we shall add various properties such as markers with varying levels of opacity, graduated coloring of lines (which uses colorlover) and so on.
def convert_df_plotly(self, dataframe, axis_no = 1, color_def = ['default'], special_line = 'Mean', showlegend = True, addmarker = False, gradcolor = None): # convert_df_plotly - converts a Pandas data frame to Plotly format for line plots # dataframe = data frame due to be converted # axis_no = axis for plot to be drawn (default = 1) # special_line = make lines named this extra thick # color_def = color scheme to be used (default = ['default']), colour will alternate in the list # showlegend = True or False to show legend of this line on plot # addmarker = True or False to add markers # gradcolor = Create a graduated color scheme for the lines # # Also see http://nbviewer.ipython.org/gist/nipunreddevil/7734529 for converting dataframe to traces # Also see http://moderndata.plot.ly/color-scales-in-ipython-notebook/ x = dataframe.index.values traces = [] # will be used for market opacity for the markers increments = 0.95 / float(len(dataframe.columns)) if gradcolor is not None: try: import colorlover as cl color_def = cl.scales[str(len(dataframe.columns))]['seq'][gradcolor] except: print('Check colorlover installation...') i = 0 for key in dataframe: scatter = plotly.graph_objs.Scatter( x = x, y = dataframe[key].values, name = key, xaxis = 'x' + str(axis_no), yaxis = 'y' + str(axis_no), showlegend = showlegend) # only apply color/marker properties if not "default" if color_def[i % len(color_def)] != "default": if special_line in str(key): # special case for lines labelled "mean" # make line thicker scatter['mode'] = 'lines' scatter['line'] = plotly.graph_objs.Line( color = color_def[i % len(color_def)], width = 2 ) else: line_width = 1 # set properties for the markers which change opacity # for markers make lines thinner if addmarker: opacity = 0.05 + (increments * i) scatter['mode'] = 'markers+lines' scatter['marker'] = plotly.graph_objs.Marker( color=color_def[i % len(color_def)], # marker color opacity = opacity, size = 5) line_width = 0.2 else: scatter['mode'] = 'lines' scatter['line'] = plotly.graph_objs.Line( color = color_def[i % len(color_def)], width = line_width) i = i + 1 traces.append(scatter) return traces EventPlot.convert_df_plotly = convert_df_plotly
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
UK election analysis We've now created several generic functions for downloading data, doing an event study and also for helping us out with plotting via Plotly. We now start work on the ukelection.py script, for pulling it all together. As a very first step we need to provide credentials for Plotly (you can get your own Plotly key and username [here](https://plot.ly/python/getting-started/)).
# for time series/maths import pandas # for plotting data import plotly import plotly.plotly as py from plotly.graph_objs import * def ukelection(): # Learn about API authentication here: https://plot.ly/python/getting-started # Find your api_key here: https://plot.ly/settings/api plotly_username = "thalesians" plotly_api_key = "XXXXXXXXX" plotly.tools.set_credentials_file(username=plotly_username, api_key=plotly_api_key)
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Let's download our market data that we need (GBP/USD spot data) using the DataDownloader class. As a default, I've opted to use Bloomberg data. You can try other currency pairs or markets (for example FTSE), to compare results for the event study. Note that obviously each data vendor will have a different ticker in their system for what could well be the same asset. With FX, care must be taken to know which close the vendor is snapping. As a default we have opted for BGN, which for GBP/USD is the NY close value.
ticker = 'GBPUSD' # will use in plot titles later (and for creating Plotly URL) ##### download market GBP/USD data from Quandl, Bloomberg or CSV file source = "Bloomberg" # source = "Quandl" # source = "CSV" csv_file = None event_plot = EventPlot() data_downloader = DataDownloader() start_date = event_plot.parse_dates(['01/01/1975']) if source == 'Quandl': vendor_ticker = "BOE/XUDLUSS" elif source == 'Bloomberg': vendor_ticker = 'GBPUSD BGN Curncy' elif source == 'CSV': vendor_ticker = 'GBPUSD' csv_file = 'D:/GBPUSD.csv' spot = data_downloader.download_time_series(vendor_ticker, ticker, start_date[0], source, csv_file = csv_file)
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
The most important part of the study is getting the historical UK election dates! We can obtain these from Wikipedia. We then convert into Python format. We need to make sure we filter the UK election dates, for where we have spot data available.
labour_wins = ['28/02/1974', '10/10/1974', '01/05/1997', '07/06/2001', '05/05/2005'] conservative_wins = ['03/05/1979', '09/06/1983', '11/06/1987', '09/04/1992', '06/05/2010'] # convert to more easily readable format labour_wins_d = event_plot.parse_dates(labour_wins) conservative_wins_d = event_plot.parse_dates(conservative_wins) # only takes those elections where we have data labour_wins_d = [d for d in labour_wins_d if d > spot.index[0].to_pydatetime()] conservative_wins_d = [d for d in conservative_wins_d if d > spot.index[0].to_pydatetime()] spot.index.name = 'Date'
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
We then call our event study function in EventPlot on our spot data, which compromises of the 20 days before up till the 20 days after the UK general election. We shall plot these lines later.
# number of days before and after for our event study pre = -20 post = 20 # calculate spot path during Labour wins labour_wins_spot = event_plot.event_study(spot, labour_wins_d, pre, post, mean_label = 'Labour Mean') # calculate spot path during Conservative wins conservative_wins_spot = event_plot.event_study(spot, conservative_wins_d, pre, post, mean_label = 'Conservative Mean')
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Define our xaxis and yaxis labels, as well as our source, which we shall later include in the title.
##### Create separate plots of price action during Labour and Conservative wins xaxis = 'Days' yaxis = 'Index' source_label = "Source: @thalesians/BBG/Wikipedia"
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
We're finally ready for our first plot! We shall plot GBP/USD moves over Labour election wins, using the default palette and then we shall embed it into the sheet, using the URL given to us from the Plotly website.
###### Plot market reaction during Labour UK election wins ###### Using default color scheme title = ticker + ' during UK gen elect - Lab wins' + '<BR>' + source_label fig = Figure(data=event_plot.convert_df_plotly(labour_wins_spot), layout=event_plot.create_layout(title, xaxis, yaxis) ) py.iplot(fig, filename='labour-wins-' + ticker)
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
The "iplot" function will send it to Plotly's server (provided we have all the dependencies installed). Alternatively, we could embed the HTML as an image, which we have taken from the Plotly website. Note this approach will yield a static image which is fetched from Plotly's servers. It also possible to write the image to disk. Later we shall show the embed function. We next plot GBP/USD over Conservative wins. In this instance, however, we have a graduated 'Blues' color scheme, given obviously that blue is the color of the Conserative party in the UK!
###### Plot market reaction during Conservative UK election wins ###### Using varying shades of blue for each line (helped by colorlover library) title = ticker + ' during UK gen elect - Con wins ' + '<BR>' + source_label # also apply graduated color scheme of blues (from light to dark) # see http://moderndata.plot.ly/color-scales-in-ipython-notebook/ for details on colorlover package # which allows you to set scales fig = Figure(data=event_plot.convert_df_plotly(conservative_wins_spot, gradcolor='Blues', addmarker=False), layout=event_plot.create_layout(title, xaxis, yaxis), ) plot_url = py.iplot(fig, filename='conservative-wins-' + ticker)
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Embed the chart into the document using "embed". This essentially embeds the Javascript code, necessary to make it interactive.
import plotly.tools as tls tls.embed("https://plot.ly/~thalesians/245")
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Our final plot, will consist of three subplots, Labour wins, Conservative wins, and average moves for both. We also add a grid and a grey background for each plot.
##### Plot market reaction during Conservative UK election wins ##### create a plot consisting of 3 subplots (from left to right) ##### 1. Labour wins, 2. Conservative wins, 3. Conservative/Labour mean move # create a dataframe which grabs the mean from the respective Lab & Con election wins mean_wins_spot = pandas.DataFrame() mean_wins_spot['Labour Mean'] = labour_wins_spot['Labour Mean'] mean_wins_spot['Conservative Mean'] = conservative_wins_spot['Conservative Mean'] fig = plotly.tools.make_subplots(rows=1, cols=3) # apply different color scheme (red = Lab, blue = Con) # also add markets, which will have varying levels of opacity fig['data'] += Data( event_plot.convert_df_plotly(conservative_wins_spot, axis_no=1, color_def=['blue'], addmarker=True) + event_plot.convert_df_plotly(labour_wins_spot, axis_no=2, color_def=['red'], addmarker=True) + event_plot.convert_df_plotly(mean_wins_spot, axis_no=3, color_def=['red', 'blue'], addmarker=True, showlegend = False) ) fig['layout'].update(title=ticker + ' during UK gen elects by winning party ' + '<BR>' + source_label) # use the scheme from https://plot.ly/python/bubble-charts-tutorial/ # can use dict approach, rather than specifying each separately axis_style = dict( gridcolor='#FFFFFF', # white grid lines ticks='outside', # draw ticks outside axes ticklen=8, # tick length tickwidth=1.5 # and width ) # create the various axes for the three separate charts fig['layout'].update(xaxis1=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis1=plotly.graph_objs.YAxis(axis_style, title=yaxis)) fig['layout'].update(xaxis2=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis2=plotly.graph_objs.YAxis(axis_style)) fig['layout'].update(xaxis3=plotly.graph_objs.XAxis(axis_style, title=xaxis)) fig['layout'].update(yaxis3=plotly.graph_objs.YAxis(axis_style)) fig['layout'].update(plot_bgcolor='#EFECEA') # set plot background to grey plot_url = py.iplot(fig, filename='labour-conservative-wins-'+ ticker + '-subplot')
This is the format of your plot grid: [ (1,1) x1,y1 ] [ (1,2) x2,y2 ] [ (1,3) x3,y3 ]
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
This time we use "embed", which grab the plot from Plotly's server, we did earlier (given we have already uploaded it).
import plotly.tools as tls tls.embed("https://plot.ly/~thalesians/246")
_____no_output_____
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
That's about it! I hope the code I've written proves fruitful for creating some very cool Plotly plots and also for doing some very timely analysis ahead of the UK general election! Hoping this will be first of many blogs on using Plotly data. The analysis in this blog is based on a report I wrote for Thalesians, a quant finance thinktank. If you are interested in getting access to the full copy of the report (Thalesians: My kingdom for a vote - The definitive quant guide to UK general elections), feel free to e-mail me at saeed@thalesians.com or tweet me @thalesians Want to hear more about global macro and UK election developments? If you're interested in FX and the UK general election, come to our Thalesians panel in London on April 29th 2015 at 7.30pm in Canary Wharf, which will feature, Eric Burroughs (Reuters - FX Buzz Editor), Mark Cudmore (Bloomberg - First Word EM Strategist), Jordan Rochester (Nomura - FX strategist), Jeremy Wilkinson-Smith (Independent FX trader) and myself as the moderator. Tickets are available [here](http://www.meetup.com/thalesians/events/221147156/) Biography Saeed Amen is the managing director and co-founder of the Thalesians. He has a decade of experience creating and successfully running systematic trading models at Lehman Brothers, Nomura and now at the Thalesians. Independently, he runs a systematic trading model with proprietary capital. He is the author of Trading Thalesians – What the ancient world can teach us about trading today (Palgrave Macmillan). He graduated with a first class honours master’s degree from Imperial College in Mathematics & Computer Science. He is also a fan of Python and has written an extensive library for financial market backtesting called PyThalesians.Follow the Thalesians on Twitter @thalesians and get my book on Amazon [here](http://www.amazon.co.uk/Trading-Thalesians-Saeed-Amen/dp/113739952X) All the code here is available to download from the [Thalesians GitHub page](https://github.com/thalesians/pythalesians)
from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install publisher --upgrade import publisher publisher.publish( 'ukelectionbbg.ipynb', 'ipython-notebooks/ukelectionbbg/', 'Plotting GBP/USD price action around UK general elections', 'Create interactive graphs with market data, IPython Notebook and Plotly', name='Plot MP Action in GBP/USD around UK General Elections')
Requirement already up-to-date: publisher in /Users/chriddyp/Repos/venvpy27/lib/python2.7/site-packages/publisher-0.4-py2.7.egg
CC-BY-3.0
_posts/ipython-notebooks/ukelectionbbg.ipynb
jacolind/documentation
Introduction to Bayesian Optimization with GPyOpt Written by Javier Gonzalez, Amazon Research Cambridge*Last updated Monday, 22 May 2017.*=====================================================================================================1. **How to use GPyOpt?**2. **The Basics of Bayesian Optimization** 1. Gaussian Processes 2. Acquisition functions 3. Applications of Bayesian Optimization 3. **1D optimization example**4. **2D optimization example**===================================================================================================== 1. How to use GPyOpt? We start by loading GPyOpt and GPy.
%pylab inline import GPy import GPyOpt from numpy.random import seed import matplotlib
Populating the interactive namespace from numpy and matplotlib warning in stationary: failed to import cython module: falling back to numpy
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
GPyOpt is easy to use as a black-box functions optimizer. To start you only need: * Your favorite function $f$ to minimize. We use $f(x)=2x^2$ in this toy example, whose global minimum is at x=0.
def myf(x): return (2*x)**2
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
* A set of box constrains, the interval [-1,1] in our case. You can define a list of dictionaries where each element defines the name, type and domain of the variables.
bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (-1,1)}]
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
* A budget, or number of allowed evaluations of $f$.
max_iter = 15
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
With this three pieces of information GPyOpt has enough to find the minimum of $f$ in the selected region. GPyOpt solves the problem in two steps. First, you need to create a GPyOpt object that stores the problem (f and and box-constrains). You can do it as follows.
myProblem = GPyOpt.methods.BayesianOptimization(myf,bounds)
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
Next you need to run the optimization for the given budget of iterations. This bit it is a bit slow because many default options are used. In the next notebooks of this manual you can learn how to change other parameters to optimize the optimization performance.
myProblem.run_optimization(max_iter)
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
Now you can check the best found location $x^*$ by
myProblem.x_opt
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
and the predicted value value of $f$ at $x^*$ optimum by
myProblem.fx_opt
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
And that's it! Keep reading to learn how GPyOpt uses Bayesian Optimization to solve this an other optimization problem. You will also learn all the features and options that you can use to solve your problems efficiently. ===================================================================================================== 2. The Basics of Bayesian OptimizationBayesian optimization (BO) is an strategy for global optimization of black-box functions [(Snoek et al., 2012)](http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf). Let $f: {\mathcal X} \to R$ be a L-Lipschitz continuous function defined on a compact subset ${\mathcal X} \subseteq R^d$. We are interested in solving the global optimization problem of finding$$ x_{M} = \arg \min_{x \in {\mathcal X}} f(x). $$We assume that $f$ is a *black-box* from which only perturbed evaluations of the type $y_i = f(x_i) + \epsilon_i$, with $\epsilon_i \sim\mathcal{N}(0,\psi^2)$, are available. The goal is to make a series of $x_1,\dots,x_N$ evaluations of $f$ such that the *cumulative regret* $$r_N= Nf(x_{M})- \sum_{n=1}^N f(x_n),$$ is minimized. Essentially, $r_N$ is minimized if we start evaluating $f$ at $x_{M}$ as soon as possible. There are two crucial bits in any Bayesian Optimization (BO) procedure approach.1. Define a **prior probability measure** on $f$: this function will capture the our prior beliefs on $f$. The prior will be updated to a 'posterior' using the available data.2. Define an **acquisition function** $acqu(x)$: this is a criteria to decide where to sample next in order to gain the maximum information about the location of the global maximum of $f$.Every time a new data point is collected. The model is re-estimated and the acquisition function optimized again until convergence. Given a prior over the function $f$ and an acquisition function, a BO procedure will converge to the optimum of $f$ under some conditions [(Bull, 2011)](http://arxiv.org/pdf/1101.3501.pdf). 2.1 Prior probability meassure on $f$: Gaussian processes A Gaussian process (GP) is a probability distribution across classes functions, typically smooth, such that each linear finite-dimensional restriction is multivariate Gaussian [(Rasmussen and Williams, 2006)](http://www.gaussianprocess.org/gpml). GPs are fully parametrized by a mean $\mu(x)$ and a covariance function $k(x,x')$. Without loss of generality $\mu(x)$ is assumed to be zero. The covariance function $k(x,x')$ characterizes the smoothness and other properties of $f$. It is known as thekernel of the process and has to be continuous, symmetric and positive definite. A widely used kernel is the square exponential, given by$$ k(x,x') = l \cdot \exp{ \left(-\frac{\|x-x'\|^2}{2\sigma^2}\right)} $$where $\sigma^2$ and and $l$ are positive parameters. To denote that $f$ is a sample from a GP with mean $\mu$ and covariance $k$ we write $$f(x) \sim \mathcal{GP}(\mu(x),k(x,x')).$$ For regression tasks, the most important feature of GPs is that process priors are conjugate to the likelihood from finitely many observations $y= (y_1,\dots,y_n)^T$ and $X =\{x_1,...,x_n\}$, $x_i\in \mathcal{X}$ of the form $y_i = f(x_i) + \epsilon_i $where $\epsilon_i \sim \mathcal{N} (0,\sigma^2)$. We obtain the Gaussian posterior posterior $f(x^*)|X, y, \theta \sim \mathcal{N}(\mu(x^*),\sigma^2(x^*))$, where $\mu(x^*)$ and $\sigma^2(x^*)$ have close form. See [(Rasmussen and Williams, 2006)](http://www.gaussianprocess.org/gpml) for details. 2.2 Acquisition FunctionAcquisition functions are designed represents our beliefs over the maximum of $f(x)$. Denote by $\theta$ the parameters of the GP model and by $\{x_i,y_i\}$ the available sample. Three of the most common acquisition functions, all available in GPyOpt are:* **Maximum probability of improvement (MPI)**:$$acqu_{MPI}(x;\{x_n,y_n\},\theta) = \Phi(\gamma(x)), \mbox{where}\ \gamma(x)=\frac{\mu(x;\{x_n,y_n\},\theta)-f(x_{best})-\psi}{\sigma(x;\{x_n,y_n\},\theta)}.$$* **Expected improvement (EI)**:$$acqu_{EI}(x;\{x_n,y_n\},\theta) = \sigma(x;\{x_n,y_n\},\theta) (\gamma(x) \Phi(\gamma(x))) + N(\gamma(x);0,1).$$* **Upper confidence bound (UCB)**:$$acqu_{UCB}(x;\{x_n,y_n\},\theta) = -\mu(x;\{x_n,y_n\},\theta)+\psi\sigma(x;\{x_n,y_n\},\theta).$$$\psi$ is a tunable parameters that help to make the acquisition functions more flexible. Also, in the case of the UBC, the parameter $\eta$ is useful to define the balance between the importance we give to the mean and the variance of the model. This is know as the **exploration/exploitation trade off**. 2.3 Applications of Bayesian OptimizationBayesian Optimization has been applied to solve a wide range of problems. Among many other, some nice applications of Bayesian Optimization include: * Sensor networks (http://www.robots.ox.ac.uk/~parg/pubs/ipsn673-garnett.pdf),* Automatic algorithm configuration (http://www.cs.ubc.ca/labs/beta/Projects/SMAC/papers/11-LION5-SMAC.pdf), * Deep learning (http://www.mlss2014.com/files/defreitas_slides1.pdf), * Gene design (http://bayesopt.github.io/papers/paper5.pdf),* and a long etc!In this Youtube video you can see Bayesian Optimization working in a real time in a robotics example. [(Calandra1 et al. 2008)](http://www.ias.tu-darmstadt.de/uploads/Site/EditPublication/Calandra_LION8.pdf)
from IPython.display import YouTubeVideo YouTubeVideo('ualnbKfkc3Q')
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
3. One dimensional exampleIn this example we show how GPyOpt works in a one-dimensional example a bit more difficult that the one we analyzed in Section 3. Let's consider here the Forrester function $$f(x) =(6x-2)^2 \sin(12x-4)$$ defined on the interval $[0, 1]$. The minimum of this function is located at $x_{min}=0.78$. The Forrester function is part of the benchmark of functions of GPyOpt. To create the true function, the perturbed version and boundaries of the problem you need to run the following cell.
%pylab inline import GPy import GPyOpt # Create the true and perturbed Forrester function and the boundaries of the problem f_true= GPyOpt.objective_examples.experiments1d.forrester() # noisy version bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,1)}] # problem constrains
Populating the interactive namespace from numpy and matplotlib
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
We plot the true Forrester function.
f_true.plot()
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
As we did in Section 3, we need to create the GPyOpt object that will run the optimization. We specify the function, the boundaries and we add the type of acquisition function to use.
# Creates GPyOpt object with the model and anquisition fucntion seed(123) myBopt = GPyOpt.methods.BayesianOptimization(f=f_true.f, # function to optimize domain=bounds, # box-constrains of the problem acquisition_type='EI', exact_feval = True) # Selects the Expected improvement
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt
Now we want to run the optimization. Apart from the number of iterations you can select how do you want to optimize the acquisition function. You can run a number of local optimizers (acqu_optimize_restart) at random or in grid (acqu_optimize_method).
# Run the optimization max_iter = 15 # evaluation budget max_time = 60 # time budget eps = 10e-6 # Minimum allows distance between the las two observations myBopt.run_optimization(max_iter, max_time, eps)
_____no_output_____
BSD-3-Clause
manual/GPyOpt_reference_manual.ipynb
komorihi/GPyOpt