text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import sys sys.path.append("..") import numpy as np np.seterr(divide="ignore") import logging import pickle import glob from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.preprocessing import RobustScaler from sklearn.utils import check_random_state from scipy import interp from recnn.preprocessing import rewrite_content from recnn.preprocessing import permute_by_pt from recnn.preprocessing import extract from recnn.preprocessing import sequentialize_by_pt from recnn.preprocessing import randomize %matplotlib inline import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (6, 6) ``` # Plotting functions ``` from recnn.preprocessing import sequentialize_by_pt def load_tf(filename_train, preprocess=None, n_events_train=-1): # Make training data print("Loading training data...") fd = open(filename_train, "rb") X, y = pickle.load(fd) fd.close() y = np.array(y) if n_events_train > 0: indices = check_random_state(123).permutation(len(X))[:n_events_train] X = [X[i] for i in indices] y = y[indices] print("\tfilename = %s" % filename_train) print("\tX size = %d" % len(X)) print("\ty size = %d" % len(y)) # Preprocessing print("Preprocessing...") X = [rewrite_content(jet) for jet in X] if preprocess: X = [preprocess(jet) for jet in X] X = [extract(permute_by_pt(jet)) for jet in X] tf = RobustScaler().fit(np.vstack([jet["content"] for jet in X])) return tf def load_test(tf, filename_test, preprocess=None, cropping=True): # Make test data print("Loading test data...") fd = open(filename_test, "rb") X, y = pickle.load(fd) fd.close() y = np.array(y) print("\tfilename = %s" % filename_test) print("\tX size = %d" % len(X)) print("\ty size = %d" % len(y)) # Preprocessing print("Preprocessing...") X = [rewrite_content(jet) for jet in X] if preprocess: X = [preprocess(jet) for jet in X] X = [extract(permute_by_pt(jet)) for jet in X] for jet in X: jet["content"] = tf.transform(jet["content"]) if not cropping: return X, y # Cropping X_ = [j for j in X if 250 < j["pt"] < 300 and 50 < j["mass"] < 110] y_ = [y[i] for i, j in enumerate(X) if 250 < j["pt"] < 300 and 50 < j["mass"] < 110] X = X_ y = y_ y = np.array(y) print("\tX size = %d" % len(X)) print("\ty size = %d" % len(y)) # Weights for flatness in pt w = np.zeros(len(y)) X0 = [X[i] for i in range(len(y)) if y[i] == 0] pdf, edges = np.histogram([j["pt"] for j in X0], density=True, range=[250, 300], bins=50) pts = [j["pt"] for j in X0] indices = np.searchsorted(edges, pts) - 1 inv_w = 1. / pdf[indices] inv_w /= inv_w.sum() w[y==0] = inv_w X1 = [X[i] for i in range(len(y)) if y[i] == 1] pdf, edges = np.histogram([j["pt"] for j in X1], density=True, range=[250, 300], bins=50) pts = [j["pt"] for j in X1] indices = np.searchsorted(edges, pts) - 1 inv_w = 1. / pdf[indices] inv_w /= inv_w.sum() w[y==1] = inv_w return X, y, w from recnn.recnn import grnn_transform_simple from recnn.recnn import grnn_predict_simple from recnn.recnn import grnn_predict_gated from recnn.recnn import grnn_predict_simple_join def predict(X, filename, func=grnn_predict_simple): fd = open(filename, "rb") params = pickle.load(fd) fd.close() y_pred = func(params, X) return y_pred def evaluate_models(X, y, w, pattern, func=grnn_predict_simple): rocs = [] fprs = [] tprs = [] for filename in glob.glob(pattern): print("Loading %s" % filename), y_pred = predict(X, filename, func=func) # Roc rocs.append(roc_auc_score(y, y_pred, sample_weight=w)) fpr, tpr, _ = roc_curve(y, y_pred, sample_weight=w) fprs.append(fpr) tprs.append(tpr) print("ROC AUC = %.4f" % rocs[-1]) print("Mean ROC AUC = %.4f" % np.mean(rocs)) return rocs, fprs, tprs def build_rocs(prefix_train, prefix_test, model_pattern, preprocess=None, gated=False): tf = load_tf("../data/w-vs-qcd/final/%s-train.pickle" % prefix_train, preprocess=preprocess) X, y, w = load_test(tf, "../data/w-vs-qcd/final/%s-test.pickle" % prefix_test, preprocess=preprocess) if not gated: rocs, fprs, tprs = evaluate_models(X, y, w, "../models/jet-study-2/model-w-s-%s-[0-9]*.pickle" % model_pattern) else: rocs, fprs, tprs = evaluate_models(X, y, w, "../models/jet-study-2/model-w-g-%s-[0-9]*.pickle" % model_pattern, func=grnn_predict_gated) return rocs, fprs, tprs def remove_outliers(rocs, fprs, tprs): inv_fprs = [] base_tpr = np.linspace(0.05, 1, 476) for fpr, tpr in zip(fprs, tprs): inv_fpr = interp(base_tpr, tpr, 1. / fpr) inv_fprs.append(inv_fpr) inv_fprs = np.array(inv_fprs) scores = inv_fprs[:, 225] p25 = np.percentile(scores, 1 / 6. * 100.) p75 = np.percentile(scores, 5 / 6. * 100) robust_mean = np.mean([scores[i] for i in range(len(scores)) if p25 <= scores[i] <= p75]) robust_std = np.std([scores[i] for i in range(len(scores)) if p25 <= scores[i] <= p75]) indices = [i for i in range(len(scores)) if robust_mean - 3*robust_std <= scores[i] <= robust_mean + 3*robust_std] new_r, new_f, new_t = [], [], [] for i in indices: new_r.append(rocs[i]) new_f.append(fprs[i]) new_t.append(tprs[i]) return new_r, new_f, new_t def report_score(rocs, fprs, tprs, label, latex=False, input="particles", short=False): inv_fprs = [] base_tpr = np.linspace(0.05, 1, 476) for fpr, tpr in zip(fprs, tprs): inv_fpr = interp(base_tpr, tpr, 1. / fpr) inv_fprs.append(inv_fpr) inv_fprs = np.array(inv_fprs) mean_inv_fprs = inv_fprs.mean(axis=0) if not latex: print("%32s\tROC AUC=%.4f+-%.2f\t1/FPR@TPR=0.5=%.2f+-%.2f" % (label, np.mean(rocs), np.std(rocs), np.mean(inv_fprs[:, 225]), np.std(inv_fprs[:, 225]))) else: if not short: print("%10s \t& %30s \t& %.4f $\pm$ %.4f \t& %.1f $\pm$ %.1f \\\\" % (input, label, np.mean(rocs), np.std(rocs), np.mean(inv_fprs[:, 225]), np.std(inv_fprs[:, 225]))) else: print("%30s \t& %.4f $\pm$ %.4f \t& %.1f $\pm$ %.1f \\\\" % (label, np.mean(rocs), np.std(rocs), np.mean(inv_fprs[:, 225]), np.std(inv_fprs[:, 225]))) def plot_rocs(rocs, fprs, tprs, label="", color="r", show_all=False): inv_fprs = [] base_tpr = np.linspace(0.05, 1, 476) for fpr, tpr in zip(fprs, tprs): inv_fpr = interp(base_tpr, tpr, 1. / fpr) inv_fprs.append(inv_fpr) if show_all: plt.plot(base_tpr, inv_fpr, alpha=0.1, color=color) inv_fprs = np.array(inv_fprs) mean_inv_fprs = inv_fprs.mean(axis=0) plt.plot(base_tpr, mean_inv_fprs, color, label="%s" % label) def plot_show(filename=None): plt.xlabel("Signal efficiency") plt.ylabel("1 / Background efficiency") plt.xlim([0.1, 1.0]) plt.ylim(1, 500) plt.yscale("log") plt.legend(loc="best") plt.grid() if filename: plt.savefig(filename) plt.show() ``` # Count parameters ``` def count(params): def _count(thing): if isinstance(thing, list): c = 0 for stuff in thing: c += _count(stuff) return c elif isinstance(thing, np.ndarray): return np.prod(thing.shape) c = 0 for k, v in params.items(): c += _count(v) return c # Simple vs gated fd = open("../models/jet-study-2/model-w-s-antikt-kt-1.pickle", "rb") params = pickle.load(fd) fd.close() print("Simple =", count(params)) fd = open("../models/jet-study-2/model-w-g-antikt-kt-1.pickle", "rb") params = pickle.load(fd) fd.close() print("Gated =", count(params)) # double # Simple vs gated fd = open("../models/jet-study-2/model-w-sd-antikt-kt-1.pickle", "rb") params = pickle.load(fd) fd.close() print("Simple =", count(params)) fd = open("../models/jet-study-2/model-w-gd-antikt-kt-1.pickle", "rb") params = pickle.load(fd) fd.close() print("Gated =", count(params)) ``` # Embedding visualization ``` prefix_train = "antikt-kt" prefix_test = prefix_train tf = load_tf("../data/w-vs-qcd/final/%s-train.pickle" % prefix_train) X, y, w = load_test(tf, "../data/w-vs-qcd/final/%s-test.pickle" % prefix_test) fd = open("../models/jet-study-2/model-w-s-antikt-kt-1.pickle", "rb") params = pickle.load(fd) fd.close() Xt = grnn_transform_simple(params, X[:5000]) from sklearn.manifold import TSNE Xtt = TSNE(n_components=2).fit_transform(Xt) for i in range(5000): plt.scatter(Xtt[i, 0], Xtt[i, 1], color="b" if y[i] == 1 else "r", alpha=0.5) plt.show() from sklearn.decomposition import PCA Xtt = PCA(n_components=2).fit_transform(Xt) for i in range(5000): plt.scatter(Xtt[i, 0], Xtt[i, 1], color="b" if y[i] == 1 else "r", alpha=0.5) plt.show() ``` # Generate all ROCs ``` for pattern, gated in [ # Simple ## Particles ("antikt-kt", False), ("antikt-cambridge", False), ("antikt-antikt", False), ("antikt-random", False), ("antikt-seqpt", False), ("antikt-seqpt-reversed", False), ## Towers ("antikt-kt-delphes", False), ("antikt-cambridge-delphes", False), ("antikt-antikt-delphes", False), ("antikt-random-delphes", False), ("antikt-seqpt-delphes", False), ("antikt-seqpt-reversed-delphes", False), ## Images ("antikt-kt-images", False), # Gated ## Particles ("antikt-kt", True), ("antikt-antikt", True), ("antikt-seqpt", True), ("antikt-seqpt-reversed", True), ("antikt-cambridge", True), ("antikt-random", True), ## Towers ("antikt-kt-delphes", True), ("antikt-antikt-delphes", True), ("antikt-seqpt-delphes", True), ("antikt-seqpt-reversed-delphes", True), ("antikt-cambridge-delphes", True), ("antikt-random-delphes", True), ## Images ("antikt-kt-images", True) ]: r, f, t = build_rocs(pattern, pattern, pattern, gated=gated) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "wb") pickle.dump((r, f, t), fd) fd.close() # sd/gd == contatenate embeddings of h1_L + h1_R for pattern, gated in [ # Simple ## Particles ("antikt-kt", False), ## Towers ("antikt-kt-delphes", False), ## Images ("antikt-kt-images", False), # Gated ## Particles ("antikt-kt", True), ## Towers ("antikt-kt-delphes", True), ## Images ("antikt-kt-images", True) ]: r, f, t = build_rocs(pattern, pattern, pattern, gated=gated) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("sd" if not gated else "gd", pattern), "wb") pickle.dump((r, f, t), fd) fd.close() ``` # Table ``` for pattern, gated, label in [ # Simple ## Particles ("antikt-kt", False, "RNN $k_t$"), ("antikt-cambridge", False, "RNN C/A"), ("antikt-antikt", False, "RNN anti-$k_t$"), ("antikt-random", False, "RNN random"), ("antikt-seqpt", False, "RNN asc-$p_T$"), ("antikt-seqpt-reversed", False, "RNN desc-$p_T$"), ## Towers ("antikt-kt-delphes", False, "RNN $k_t$"), ("antikt-cambridge-delphes", False, "RNN C/A"), ("antikt-antikt-delphes", False, "RNN anti-$k_t$"), ("antikt-random-delphes", False, "RNN random"), ("antikt-seqpt-delphes", False, "RNN asc-$p_T$"), ("antikt-seqpt-reversed-delphes", False, "RNN desc-$p_T$"), ## Images ("antikt-kt-images", False, "RNN $k_t$"), # Gated ## Particles ("antikt-kt", True, "RNN $k_t$ (gated)"), ("antikt-cambridge", True, "RNN C/A (gated)"), ("antikt-antikt", True, "RNN anti-$k_t$ (gated)"), ("antikt-random", True, "RNN random (gated)"), ("antikt-seqpt", True, "RNN asc-$p_T$ (gated)"), ("antikt-seqpt-reversed", True, "RNN desc-$p_T$ (gated)"), ## Towers ("antikt-kt-delphes", True, "RNN $k_t$ (gated)"), ("antikt-cambridge-delphes", True, "RNN C/A (gated)"), ("antikt-antikt-delphes", True, "RNN anti-$k_t$ (gated)"), ("antikt-random-delphes", True, "RNN random (gated)"), ("antikt-seqpt-delphes", True, "RNN asc-$p_T$ (gated)"), ("antikt-seqpt-reversed-delphes", True, "RNN desc-$p_T$ (gated)"), # Images ("antikt-kt-images", False, "RNN $k_t$"), ("antikt-kt-images", True, "RNN $k_t$ (gated)") ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) report_score(r, f, t, label=label, latex=True, input="particles" if "delphes" not in pattern and "images" not in pattern else "towers") for pattern, gated, label in [ # Simple ## Particles ("antikt-kt", False, "RNN $k_t$"), ## Towers ("antikt-kt-delphes", False, "RNN $k_t$"), ## Images ("antikt-kt-images", False, "RNN $k_t$"), # Gated ## Particles ("antikt-kt", True, "RNN $k_t$ (gated)"), ## Towers ("antikt-kt-delphes", True, "RNN $k_t$ (gated)"), # Images ("antikt-kt-images", True, "RNN $k_t$ (gated)") ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("sd" if not gated else "gd", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) report_score(r, f, t, label=label, latex=True, input="particles" if "delphes" not in pattern and "images" not in pattern else "towers") ``` # Plots ``` # Simple vs gated for pattern, gated, label, color in [ ("antikt-kt", False, "RNN $k_t$ (simple)", "r"), ("antikt-kt", True, "RNN $k_t$ (gated)", "b") ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() # Topologies (particles, simple) for pattern, gated, label, color in [ ("antikt-kt", False, "$k_t$", "r"), ("antikt-cambridge", False, "C/A", "g"), ("antikt-antikt", False, "anti-$k_t$", "b"), ("antikt-seqpt", False, "asc-$p_T$", "c"), ("antikt-seqpt-reversed", False, "desc-$p_T$", "m"), ("antikt-random", False, "random", "orange") ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() # Topologies (towers, simple) for pattern, gated, label, color in [ ("antikt-kt-delphes", False, "RNN $k_t$", "r"), ("antikt-cambridge-delphes", False, "RNN C/A", "g"), ("antikt-antikt-delphes", False, "RNN anti-$k_t$", "b"), ("antikt-seqpt-delphes", False, "RNN asc-$p_T$", "c"), ("antikt-seqpt-reversed-delphes", False, "RNN desc-$p_T$", "m"), ("antikt-random-delphes", False, "RNN random", "orange") ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() # Topologies (particles, gated) for pattern, gated, label, color in [ ("antikt-kt", True, "RNN $k_t$", "r"), ("antikt-antikt", True, "RNN anti-$k_t$", "b"), ("antikt-seqpt", True, "RNN asc-$p_T$", "c"), ("antikt-seqpt-reversed", True, "RNN desc-$p_T$", "m"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() # Topologies (towers, gated) for pattern, gated, label, color in [ ("antikt-kt-delphes", True, "RNN $k_t$", "r"), ("antikt-antikt-delphes", True, "RNN anti-$k_t$", "b"), ("antikt-seqpt-delphes", True, "RNN asc-$p_T$", "c"), ("antikt-seqpt-reversed-delphes", True, "RNN desc-$p_T$", "m"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() # Particles vs towers vs images (simple) for pattern, gated, label, color in [ ("antikt-kt", False, "particles", "r"), ("antikt-kt-delphes", False, "towers", "g"), ("antikt-kt-images", False, "images", "b"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show(filename="particles-towers-images.pdf") # Particles vs towers vs images (gated) for pattern, gated, label, color in [ ("antikt-kt", True, "particles", "r"), ("antikt-kt-delphes", True, "towers", "g"), ("antikt-kt-images", True, "images", "b"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s.pickle" % ("s" if not gated else "g", pattern), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() ``` # Trimming ``` for pattern_train, pattern_test, gated in [ ("antikt-kt", "antikt-kt", False), ("antikt-kt", "antikt-kt-trimmed", False), ("antikt-kt-trimmed", "antikt-kt-trimmed", False), ("antikt-kt-trimmed", "antikt-kt", False), ]: r, f, t = build_rocs(pattern_train, pattern_test, pattern_train, gated=gated) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "wb") pickle.dump((r, f, t), fd) fd.close() for pattern_train, pattern_test, gated, label, color in [ ("antikt-kt", "antikt-kt", False, "$k_t$ on $k_t$", "b"), ("antikt-kt", "antikt-kt-trimmed", False, "$k_t$ on $k_t$-trimmed", "c"), ("antikt-kt-trimmed", "antikt-kt-trimmed", False, "$k_t$-trimmed on $k_t$-trimmed", "r"), ("antikt-kt-trimmed", "antikt-kt", False, "$k_t$-trimmed on $k_t$", "orange"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() ``` # Colinear splits ``` from functools import partial from recnn.preprocessing import sequentialize_by_pt preprocess_seqpt = partial(sequentialize_by_pt, reverse=False) preprocess_seqpt_rev = partial(sequentialize_by_pt, reverse=True) for pattern_train, pattern_test, gated, preprocess in [ # kt ("antikt-kt", "antikt-kt-colinear1", False, None), ("antikt-kt", "antikt-kt-colinear10", False, None), ("antikt-kt", "antikt-kt-colinear1-max", False, None), ("antikt-kt", "antikt-kt-colinear10-max", False, None), # asc-pt ("antikt-seqpt", "antikt-kt-colinear1", False, preprocess_seqpt), ("antikt-seqpt", "antikt-kt-colinear10", False, preprocess_seqpt), ("antikt-seqpt", "antikt-kt-colinear1-max", False, preprocess_seqpt), ("antikt-seqpt", "antikt-kt-colinear10-max", False, preprocess_seqpt), # desc-pt ("antikt-seqpt-reversed", "antikt-kt-colinear1", False, preprocess_seqpt_rev), ("antikt-seqpt-reversed", "antikt-kt-colinear10", False, preprocess_seqpt_rev), ("antikt-seqpt-reversed", "antikt-kt-colinear1-max", False, preprocess_seqpt_rev), ("antikt-seqpt-reversed", "antikt-kt-colinear10-max", False, preprocess_seqpt_rev), ]: r, f, t = build_rocs(pattern_train, pattern_test, pattern_train, gated=gated, preprocess=preprocess) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "wb") pickle.dump((r, f, t), fd) fd.close() for pattern_train, pattern_test, gated, label in [ # kt ("antikt-kt", "antikt-kt-colinear1", False, "$k_t$ colinear1"), ("antikt-kt", "antikt-kt-colinear10", False, "$k_t$ colinear10"), ("antikt-kt", "antikt-kt-colinear1-max", False, "$k_t$ colinear1-max"), ("antikt-kt", "antikt-kt-colinear10-max", False, "$k_t$ colinear10-max"), # asc-pt ("antikt-seqpt", "antikt-kt-colinear1", False, "asc-$p_T$ colinear1"), ("antikt-seqpt", "antikt-kt-colinear10", False, "asc-$p_T$ colinear10"), ("antikt-seqpt", "antikt-kt-colinear1-max", False, "asc-$p_T$ colinear1-max"), ("antikt-seqpt", "antikt-kt-colinear10-max", False, "asc-$p_T$ colinear10-max"), # desc-pt ("antikt-seqpt-reversed", "antikt-kt-colinear1", False, "desc-$p_T$ colinear1"), ("antikt-seqpt-reversed", "antikt-kt-colinear10", False, "desc-$p_T$ colinear10"), ("antikt-seqpt-reversed", "antikt-kt-colinear1-max", False, "desc-$p_T$ colinear1-max"), ("antikt-seqpt-reversed", "antikt-kt-colinear10-max", False, "desc-$p_T$ colinear10-max"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) report_score(r, f, t, label=label, latex=True, short=True) ``` # Soft particles ``` from functools import partial from recnn.preprocessing import sequentialize_by_pt preprocess_seqpt = partial(sequentialize_by_pt, reverse=False) preprocess_seqpt_rev = partial(sequentialize_by_pt, reverse=True) for pattern_train, pattern_test, gated, preprocess in [ ("antikt-kt", "antikt-kt-soft", False, None), ("antikt-seqpt", "antikt-kt-soft", False, preprocess_seqpt), ("antikt-seqpt-reversed", "antikt-kt-soft", False, preprocess_seqpt_rev), ]: r, f, t = build_rocs(pattern_train, pattern_test, pattern_train, gated=gated, preprocess=preprocess) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "wb") pickle.dump((r, f, t), fd) fd.close() for pattern_train, pattern_test, gated, label in [ ("antikt-kt", "antikt-kt-soft", False, "$k_t$ soft"), ("antikt-seqpt", "antikt-kt-soft", False, "asc-$p_T$ soft"), ("antikt-seqpt-reversed", "antikt-kt-soft", False, "desc-$p_T$ soft"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%s.pickle" % ("s" if not gated else "g", pattern_train, pattern_test), "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) report_score(r, f, t, label=label, latex=True, short=True) ``` # Learning curve ``` for pattern, gated, n_events in [ # ("antikt-kt", False, 6000), # ("antikt-seqpt-reversed", False, 6000), ("antikt-kt", True, 6000), ("antikt-seqpt-reversed", True, 6000), # ("antikt-kt", False, 15000), # ("antikt-seqpt-reversed", False, 15000), ("antikt-kt", True, 15000), ("antikt-seqpt-reversed", True, 15000), ]: tf = load_tf("../data/w-vs-qcd/final/%s-train.pickle" % pattern, n_events_train=n_events) X, y, w = load_test(tf, "../data/w-vs-qcd/final/%s-test.pickle" % pattern) if not gated: rocs, fprs, tprs = evaluate_models(X, y, w, "../models/jet-study-2/model-w-s-%s-%d-[0-9]*.pickle" % (pattern, n_events)) else: rocs, fprs, tprs = evaluate_models(X, y, w, "../models/jet-study-2/model-w-g-%s-%d-[0-9]*.pickle" % (pattern, n_events), func=grnn_predict_gated) # Save fd = open("../models/jet-study-2/rocs/rocs-%s-%s-%d.pickle" % ("s" if not gated else "g", pattern, n_events), "wb") pickle.dump((rocs, fprs, tprs), fd) fd.close() for pattern, label, color in [ ("s-antikt-kt", "$k_t$ 100k", "r"), ("s-antikt-kt-15000", "$k_t$ 10k", "g"), ("s-antikt-kt-6000", "$k_t$ 1k", "b"), ("s-antikt-seqpt-reversed", "desc-$p_T$ 100k", "r--"), ("s-antikt-seqpt-reversed-15000", "desc-$p_T$ 10k", "g--"), ("s-antikt-seqpt-reversed-6000", "desc-$p_T$ 1k", "b--"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s.pickle" % pattern, "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() for pattern, label, color in [ ("g-antikt-kt", "$k_t$ 100k", "r"), ("g-antikt-kt-15000", "$k_t$ 10k", "g"), ("g-antikt-kt-6000", "$k_t$ 1k", "b"), ("g-antikt-seqpt-reversed", "desc-$p_T$ 100k", "r--"), ("g-antikt-seqpt-reversed-15000", "desc-$p_T$ 10k", "g--"), ("g-antikt-seqpt-reversed-6000", "desc-$p_T$ 1k", "b--"), ]: fd = open("../models/jet-study-2/rocs/rocs-%s.pickle" % pattern, "rb") r, f, t = pickle.load(fd) fd.close() r, f, t = remove_outliers(r, f, t) plot_rocs(r, f, t, label=label, color=color) report_score(r, f, t, label=label) plot_show() ``` # Tau21 ``` import h5py f = h5py.File("../data/w-vs-qcd/h5/w_100000_j1p0_sj0p30_delphes_jets_images.h5", "r")["auxvars"] tau1 = f["tau_1"] tau2 = f["tau_2"] tau21 = np.true_divide(tau2, tau1) pt = f["pt_trimmed"] mass = f["mass_trimmed"] mask = (f["mass_trimmed"] < 110) & (f["mass_trimmed"] > 50) & (f["pt_trimmed"] < 300) & (f["pt_trimmed"] > 250) #mask = mask & np.isfinite(tau21) & (tau21 != 0.) signal_tau21 = tau21[mask] signal_pt = pt[mask] signal_mass = mass[mask] f = h5py.File("../data/w-vs-qcd/h5/qcd_100000_j1p0_sj0p30_delphes_jets_images.h5", "r")["auxvars"] tau1 = f["tau_1"] tau2 = f["tau_2"] tau21 = np.true_divide(tau2, tau1) pt = f["pt_trimmed"] mass = f["mass_trimmed"] mask = (f["mass_trimmed"] < 110) & (f["mass_trimmed"] > 50) & (f["pt_trimmed"] < 300) & (f["pt_trimmed"] > 250) #mask = mask & np.isfinite(tau21) & (tau21 != 0.) bkg_tau21 = tau21[mask] bkg_pt = pt[mask] bkg_mass = mass[mask] plt.hist(bkg_mass, histtype="step", bins=40, normed=1) plt.hist(signal_mass, histtype="step", bins=40, normed=1) tau21 = np.concatenate((signal_tau21, bkg_tau21)) pts = np.concatenate((signal_pt, bkg_pt)) masss = np.concatenate((signal_mass, bkg_mass)) X = np.hstack([tau21.reshape(-1,1), masss.reshape(-1,1)]) y = np.concatenate((np.ones(len(signal_tau21)), np.zeros(len(bkg_tau21)))) w = np.zeros(len(y)) pdf, edges = np.histogram(pts[y == 0], density=True, range=[250, 300], bins=50) indices = np.searchsorted(edges, pts[y == 0]) - 1 inv_w = 1. / pdf[indices] inv_w /= inv_w.sum() w[y==0] = inv_w pdf, edges = np.histogram(pts[y == 1], density=True, range=[250, 300], bins=50) indices = np.searchsorted(edges, pts[y == 1]) - 1 inv_w = 1. / pdf[indices] inv_w /= inv_w.sum() w[y==1] = inv_w X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(X, y, w, train_size=0.5) def evaluate_models(X, y, w): rocs = [] fprs = [] tprs = [] y_pred = X # Roc rocs.append(roc_auc_score(y, y_pred, sample_weight=w)) fpr, tpr, _ = roc_curve(y, y_pred, sample_weight=w) fprs.append(fpr) tprs.append(tpr) return rocs, fprs, tprs r, f, t = evaluate_models(-tau21, y, w) plot_rocs(r, f, t, label="tau21") report_score(r, f, t, label="tau21") r, f, t = evaluate_models(masss, y, w) plot_rocs(r, f, t, label="mass") report_score(r, f, t, label="mass") plot_show() clf = ExtraTreesClassifier(n_estimators=1000, min_samples_leaf=100, max_features=1) clf.fit(X_train, y_train) r, f, t = evaluate_models(-clf.predict_proba(X_test)[:, 0], y_test, w_test) plot_rocs(r, f, t, label="tau21+mass") report_score(r, f, t, label="tau21+mass") plot_show() ```
github_jupyter
``` import numpy as np from keras.models import Model from keras.layers import Input from keras.layers.pooling import AveragePooling3D from keras import backend as K import json from collections import OrderedDict def format_decimal(arr, places=6): return [round(x * 10**places) / 10**places for x in arr] DATA = OrderedDict() ``` ### AveragePooling3D **[pooling.AveragePooling3D.0] input 4x4x4x2, pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(290) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.1] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(291) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.2] input 4x5x2x3, pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last'** ``` data_in_shape = (4, 5, 2, 3) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(282) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.3] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(283) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.3'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.4] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(284) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.4'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.5] input 4x4x4x2, pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(285) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.5'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.6] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(286) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.6'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.7] input 4x5x4x2, pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last'** ``` data_in_shape = (4, 5, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(287) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.7'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.8] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(288) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.8'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.9] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last'** ``` data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(289) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.9'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.10] input 2x3x3x4, pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first'** ``` data_in_shape = (2, 3, 3, 4) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(290) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.10'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.11] input 2x3x3x4, pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first'** ``` data_in_shape = (2, 3, 3, 4) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(291) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.11'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[pooling.AveragePooling3D.12] input 3x4x4x3, pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first'** ``` data_in_shape = (3, 4, 4, 3) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(292) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.12'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` ### export for Keras.js tests ``` print(json.dumps(DATA)) ```
github_jupyter
# Debugging strategies In this notebook, we'll talk about what happens when you get an error message (it will happen often!) and some steps you can take to resolve them. Run the code in the next cell. ``` x = 10 if x > 20 print(f'{x} is greater than 20!') ``` The "traceback" message shows you a couple of useful things: - What line the error is on: `line 3` - The class of error: `SyntaxError` (v common) - Exactly _where_ the error occured -- see where the `^` symbol is pointing? What's the problem? #### Googling If it's not immediately clear what's wrong -- if you're not even sure what a `SyntaxError` even is -- I might start by Googling the error messsage, the word "python" and maybe some keywords for what I was trying to do when I got the error. Something like [`"SyntaxError: invalid syntax" python if statement`](https://www.google.com/search?q=%22SyntaxError%3A+invalid+syntax%22+python+if+statement) Click through the first couple of links -- you'll become _very_ familiar with StackOverflow -- and see if you spot the problem. If you're still stuck, maybe it's time to ... #### Read the docs My next stop would be the Python documentation to find some examples of the thing I'm trying to do. [Here's the page outlining how to write an `if` statement in Python](https://docs.python.org/3/tutorial/controlflow.html). From there, I would copy the example code, run it, compare it line by line with my code and see what's different. If I'm _still_ stuck, I might see if there are other keywords to search on and take another run at Google. #### Use `print()` liberally The `print()` function can be a lifesaver -- it can show you _what_ a value is before you try to do something to it, and whether it matches up with your expectations of what that value should be, and thereby give you a clue about why your script is failing. An example can help clarify this idea. **Scenario:** Your newsroom is handing out longevity bonuses. (Congratulations!) Each employee's bonus will be the number of years they've been with the company, times 50. So we're going to loop over our staff data, held in a list of dictionaries, and calculate each person's bonus. ``` staff = [ {'name': 'Fran', 'years_of_service': 2, 'job': 'Reporter'}, {'name': 'Graham', 'years_of_service': 7, 'job': 'Reporter'}, {'name': 'Pat', 'years_of_service': 4, 'job': 'Web Producer'}, {'name': 'John', 'years_of_service': '26', 'job': 'Managing Editor'}, {'name': 'Sue', 'years_of_service': 33, 'job': 'Executive Editor'} ] for person in staff: name = person['name'] bonus = person['years_of_service'] * 50 print(f'{name} is getting a bonus of {bonus}') ``` We didn't get an exception, but something is _clearly_ wrong with John's bonus. What's going on? Maybe you spot the error already. If not, we might Google something like ["python multiply numbers repeating"](https://www.google.com/search?q=python+multiply+numbers+repeating) -- which leads us to [this StackOverflow answer](https://stackoverflow.com/questions/20401871/want-to-multiply-not-repeat-variable). Is that what's going on here? Let's add a `print()` statement before we do the multiplication and use the [`type()`](https://docs.python.org/3/library/functions.html#type) function to check the value that we're pulling out of each dictionary. ``` for person in staff: name = person['name'] bonus = person['years_of_service'] * 50 print(name, type(person['years_of_service'])) print(f'{name} is getting a bonus of {bonus}') ``` Aha! John's value for `years_of_service` has been stored as a string, not an integer. Let's fix that by using the [`int()`](https://docs.python.org/3/library/functions.html#int) function to coerce the value to an integer. ``` for person in staff: name = person['name'] bonus = int(person['years_of_service']) ** 2 print(f'{name} is getting a bonus of {bonus}') ``` Winner winner, chicken dinner. Here are some more debugging exercises for you to work through. See if you can figure out what's wrong and fix them. ``` print(Hello, Pittsburgh!) desk = { 'wood': 'fir', 'color': 'black', 'height_in': 36, 'width_in': 48, 'length_in': 68 } print(desk['drawer_count']) students = ['Kelly', 'Larry', 'José', 'Frank', 'Sarah', 'Sue'] for student in students: if student = 'Kelly': print('It's Kelly!') elif student == 'José': print("It's José!") import cvs with open('../../../data/eels.csv', 'r') as o: reader = csv.DictReader(o) for row in Reader: print(row) ``` ### Further reading - [Python's tutorial on errors and exceptions](https://docs.python.org/3/tutorial/errors.html) - [Software Carpentry post on understanding Python errors](https://anenadic.github.io/2014-11-10-manchester/novice/python/07-errors.html) - [How to read a traceback](http://cs.franklin.edu/~ansaria/traceback.html)
github_jupyter
``` import subprocess try: import dgl except: subprocess.check_call(["python", '-m', 'pip', 'install', 'dgl-cu110']) import dgl import os import dgl.data from dgl.data import DGLDataset import torch import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np import tqdm from sklearn.linear_model import LinearRegression from dgl.data.utils import save_graphs from dgl.data.utils import load_graphs from dgl.nn.pytorch.conv import ChebConv import copy import matplotlib.pyplot as plt os.chdir("/content/drive/MyDrive/Winter_Research") max_pixs = 128309 ``` ## Make the Dataset ``` CA_x, CA_y = [], [] KS_x, KS_y = [], [] MT_x, MT_y = [], [] TX_x, TX_y = [], [] OH_x, OH_y = [], [] states = {"CA" : [CA_x, CA_y, "Roi_1"], "KS" : [KS_x, KS_y, "Roi_2"], "MT" : [MT_x, MT_y, "Roi_3"], "TX" : [TX_x, TX_y, "Roi_4"], "OH" : [OH_x, OH_y, "Roi_5"]} ``` #### Load into RAM ``` master_df = pd.read_csv("Sentinel2_Traffic/Traffic_Data/5_state_traffic.csv") master_df = master_df.set_index("Unnamed: 0") CA_x, CA_y = [], [] KS_x, KS_y = [], [] MT_x, MT_y = [], [] TX_x, TX_y = [], [] OH_x, OH_y = [], [] states = {"Cali" : [CA_x, CA_y, "Roi_1"], "KS" : [KS_x, KS_y, "Roi_2"], "MT" : [MT_x, MT_y, "Roi_3"], "TX" : [TX_x, TX_y, "Roi_4"], "Ohio" : [OH_x, OH_y, "Roi_5"]} j = 0 for st in ["Cali", "KS", "MT", "TX", "Ohio"]: # for st in ["TX"]: # path_check = "R/" + states[st][2] + "/greedy_a/" path = "new_roi/" + st # + "/sent_cloud_90p_raw/" # imgs_check = os.listdir(path_check) imgs = os.listdir(path) # for img, img_check in zip(imgs, imgs_check): for img in imgs: date = img[len(st):len(st) + 10] # print(date) # break try: photo = pd.read_csv(path + '/' + img) except: continue # photo_check = np.loadtxt(path_check + img_check).reshape(-1, 7, 3) # cali_pixs = 72264 # # kansas_pixs = 69071 # # mont_pixs = 72099 # # texas_pixs = 71764 # ohio_pixs = 62827 if photo.shape[0] < 50000: continue if date in list(master_df.index): if st == "Cali": lookup_st = "CA" elif st == "Ohio": lookup_st = "OH" else: lookup_st = st if not pd.isna(master_df.loc[date][lookup_st]): states[st][0].append(photo) states[st][1].append(master_df.loc[date][lookup_st]) print(j, st, photo.shape) j += 1 def gen_around(x, y): return [(x, y), (x, y + 10), (x, y - 10), (x + 10, y), (x - 10, y), (x + 10, y + 10), (x + 10, y - 10), (x - 10, y + 10), (x - 10, y - 10)] def gen_around_strict(x, y): return [(x, y), (x, y + 10), (x, y - 10), (x + 10, y), (x - 10, y)] def neighbors(road, coords, x, y, diagonal=True): neigh = [] if diagonal: cand = gen_around(x, y) else: cand = gen_around_strict(x, y) for pix in cand: if pix[0] in coords: if pix[1] in coords[pix[0]]: neigh.append(coords[pix[0]][pix[1]]['idx']) return neigh def src_dst(road, coords, diagonal=True): src, dst, values = [], [] , [] for row in range(road.shape[0]): x = road["x"][row] y = road["y"][row] idx = coords[x][y]['idx'] val = coords[x][y]['val'] # if val[0] != road[row][:3][0]: # assert(False) for c in neighbors(road, coords, x, y, diagonal): src.append(idx) dst.append(c) values.append(val) return src, dst #, values device = torch.cuda.current_device() class RoadDataset(DGLDataset): def __init__(self, states): self.states = states super().__init__(name='road_graphs') def process(self): self.graphs = [] self.labels = [] self.state = [] for st in self.states.keys(): # for st in ["TX"]: print(st) for i in range(len(self.states[st][0])): print(i) img = states[st][0][i] coords = {} vals = [] print(img.shape[0]) for j in range(img.shape[0]): # print(img[j].shape) lon = img["x"][j].astype(int) # print(lon) lat = img["y"][j].astype(int) val = [img["B2"][j], img["B3"][j], img["B4"][j]] vals.append(val) if lon not in coords: coords[lon] = {} coords[lon][lat] = {'idx' : j, 'val' : val} src, dst = src_dst(img, coords) #src, dst, values = src_dst(img, coords) # print(np.mean(src), np.mean(dst), np.mean(values)) graph = dgl.graph((src, dst), num_nodes=img.shape[0]) graph.ndata['feat'] = torch.from_numpy(np.array(vals)) #graph = graph.add_self_loop(graph) graph = graph.to(device) self.graphs.append(graph) self.labels.append(self.states[st][1][i]) self.state.append(st) # assert(False) def __getitem__(self, i): return self.graphs[i], self.labels[i], self.state[i] def __len__(self): return len(self.graphs) class RoadDatasetLoad(DGLDataset): def __init__(self, states): self.states = states super().__init__(name='road_graphs') def process(self): self.graphs = load_graphs("graphs/data_new.bin")[0] self.labels = np.loadtxt("graphs/labels_new.csv") self.state = np.loadtxt("graphs/states_new.csv", dtype=np.str) def __getitem__(self, i): return self.graphs[i], self.labels[i]#, self.state[i] def __len__(self): return len(self.graphs) Road_Graphs = RoadDataset(states) dataset = Road_Graphs dataset[100] # Road_Graphs = RoadDataset(states) save_graphs('graphs/data_new.bin', dataset.graphs) labels = np.array(dataset.labels) states = np.array(dataset.state) np.savetxt("graphs/labels_new.csv", labels) np.savetxt('graphs/states_new.csv', states, fmt="%s") Road_Load = RoadDatasetLoad(states) dataset_save = dataset # Generate a synthetic dataset with 10000 graphs, ranging from 10 to 500 nodes. # dataset = dgl.data.GINDataset('PROTEINS', self_loop=True) dataset = Road_Load ``` ## Train the Model ``` # X = dataset[:][0] # y = dataset[:][1] print(dataset.state[0:37]) print(dataset.state[37:64]) print(dataset.state[64:88]) print(dataset.state[88:119]) print(dataset.state[119:124]) from dgl.dataloading import GraphDataLoader from torch.utils.data.sampler import SubsetRandomSampler from torch.utils.data import DataLoader state_val = False one_sample = False state = "TX" lookup_state = {"CA" : 0, "KS" : 1, "MT" : 2, "TX" : 3, "OH" : 4} state_idxs = [(0, 37), (37, 64), (64, 88), (88, 119), (119, 124)] num_examples = len(dataset) if state_val: x = torch.arange(num_examples) start = state_idxs[lookup_state[state]][0] end = state_idxs[lookup_state[state]][1] test_sample = x[start + 3: end] val_sample = x[start : start + 3] train_sample = torch.cat((x[:start], x[end:])) train_sample = train_sample[torch.randperm(train_sample.shape[0])] print(train_sample) else: num_train = int(num_examples * 0.7) num_val = int(num_examples * 0.85) x = torch.randperm(num_examples) train_sample = x[:num_train] val_sample = x[num_train: num_val] test_sample = x[num_val:] train_sampler = SubsetRandomSampler(train_sample) val_sampler = SubsetRandomSampler(val_sample) test_sampler = SubsetRandomSampler(test_sample) train_dataloader = GraphDataLoader( dataset, sampler=train_sampler, batch_size=16, drop_last=False) val_dataloader = GraphDataLoader( dataset, sampler=val_sampler, batch_size=16, drop_last=False) test_dataloader = GraphDataLoader( dataset, sampler=test_sampler, batch_size=16, drop_last=False) # print(train_sample, val_sample, test_sample) it = iter(test_dataloader) batch = next(it) print(batch) batched_graph, labels = batch print('Number of nodes for each graph element in the batch:', batched_graph.batch_num_nodes()) print('Number of edges for each graph element in the batch:', batched_graph.batch_num_edges()) # Recover the original graph elements from the minibatch graphs = dgl.unbatch(batched_graph) print('The original graphs in the minibatch:') print(graphs) print(labels) from dgl.nn import GraphConv, DenseGraphConv, GATConv class GCN(nn.Module): def __init__(self, in_feats, conv_hidden, lin_hidden): super(GCN, self).__init__() self.conv_layers = nn.ModuleList() self.LR = nn.LeakyReLU(0.2) self.lin_layers = nn.ModuleList() self.conv_layers.append(GraphConv(in_feats, conv_hidden[0])) for i in range(1, len(conv_hidden)): self.conv_layers.append(GraphConv(conv_hidden[i - 1], conv_hidden[i])) for i in range(1, len(lin_hidden) - 1): self.lin_layers.append(nn.Linear(lin_hidden[i - 1], lin_hidden[i])) #self.lin_layers.append(nn.BatchNorm1d(lin_hidden[i])) self.lin_layers.append(nn.Linear(lin_hidden[-2], lin_hidden[-1])) def forward(self, g, in_feat): output = in_feat for layer in self.conv_layers: output = self.LR(layer(g, output)) # print(torch.mean(output)) graphs = dgl.unbatch(g) flat_arr = torch.zeros((g.batch_size, max_pixs)) prev = 0 # print("Before", torch.mean(output)) for i in range(len(batched_graph.batch_num_nodes())): end = prev + int(batched_graph.batch_num_nodes()[i].item()) entry = output[prev: end] entry = entry / int(g.batch_num_nodes()[i].item()) pad_val = int(torch.mean(entry).item()) pad_length = (max_pixs - entry.shape[0]) // 2 entry = torch.nn.functional.pad(entry.flatten(), (pad_length, pad_length), value=pad_val) flat_arr[i][:entry.shape[0]] = entry prev = end flat_arr = flat_arr.to(device) #print("After", torch.mean(flat_arr)) output = flat_arr for i, layer in enumerate(self.lin_layers): output = layer(output) if i != (len(self.lin_layers) - 1): output = self.LR(output) #print(flat_arr.shape) # g.ndata['h'] = h # print(dgl.mean_nodes(g, 'h')) # assert(False) return output #dgl.mean_nodes(g, 'h') # # Create the model with given dimensions model = GCN(3, [10, 10, 1], [max_pixs,1000, 500, 100, 50, 10, 1]) # model = GCN(3, 16, 1) model.cuda() criterion = nn.MSELoss() #model.to('cuda:0') optimizer = torch.optim.Adam(model.parameters(), lr=0.01) del criterion del optimizer del model torch.cuda.empty_cache() def init_weights(m): if type(m) == nn.Linear: torch.nn.init.xavier_uniform(m.weight) m.bias.data.fill_(0.01) model.apply(init_weights) best_model = model min_val = 1e9 j = 0 for epoch in range(100): loss_tot = 0 loss = 0 batches = 0 model.train() for batched_graph, labels in train_dataloader: batched_graph = batched_graph.to(device) labels = labels.to(device) pred = model(batched_graph, batched_graph.ndata['feat'].float()) # print(pred, labels) labels = labels.to(device) loss = criterion(pred, labels.reshape(labels.shape[0], 1).float()) loss_tot += loss.item() batches += 1 optimizer.zero_grad() loss.backward() optimizer.step() if j % 10 == 0: print("Train Loss:", loss_tot / batches) num_tests = 0 loss_i = 0 with torch.no_grad(): model.eval() for batched_graph, labels in val_dataloader: batched_graph = batched_graph.to(device) labels = labels.to(device) pred = model(batched_graph, batched_graph.ndata['feat'].float()) loss_i += criterion(pred, labels.reshape(labels.shape[0], 1).float()).item() # x.extend([x[0] for x in pred.cpu().detach().numpy().tolist()]) # y.extend([x[0] for x in labels.reshape(labels.shape[0], 1).cpu().detach().numpy().tolist()]) # print(type(pred)) num_tests += 1 val_loss = loss_i / num_tests if j % 10 == 0: print('Val loss:', val_loss) # val_loss.append(loss_v.item()) if val_loss < min_val: print("new_best:", val_loss) min_val = val_loss best_model = copy.deepcopy(model) j += 1 # num_correct = 0 num_tests = 0 x = [] y = [] loss = 0 with torch.no_grad(): for batched_graph, labels in test_dataloader: # print(batched_graph) batched_graph = batched_graph.to(device) labels = labels.to(device) pred = best_model(batched_graph, batched_graph.ndata['feat'].float()) loss += criterion(pred, labels.reshape(labels.shape[0], 1).float()).item() x.extend([x[0] for x in pred.cpu().detach().numpy().tolist()]) y.extend([x[0] for x in labels.reshape(labels.shape[0], 1).cpu().detach().numpy().tolist()]) num_tests += 1 print('Test loss:', loss / num_tests) x_temp = y y_temp = x # print(y_temp) # for i in range(len(y_temp)): # if y_temp[i] < 600: # y_temp.pop(i) # x_temp.pop(i) # break x_plot = np.array(y_temp) y_plot = np.array(x_temp) new_x = np.array(x_plot).reshape(-1,1) new_y = np.array(y_plot) fit = LinearRegression().fit(new_x, new_y) score = fit.score(new_x, new_y) plt.xlabel("Prediction") plt.ylabel("Actual Traffic") print(score) plt.scatter(new_x, new_y) axes = plt.gca() x_vals = np.array(axes.get_xlim()) y_vals = x_vals plt.plot(x_vals, y_vals, '--') pre_y = fit.predict(new_x) # plt.plot plt.plot(new_x, pre_y) plt.plot(x_vals, y_vals, '--') # plt.savefig("GCN_MSE_143_r_881.png") plt.show() y labels class ChebNet(nn.Module): def __init__(self, k, in_feats, hiddens, out_feats): super(ChebNet, self).__init__() self.pool = nn.MaxPool1d(2) self.layers = nn.ModuleList() self.readout = MaxPooling() # Input layer self.layers.append( ChebConv(in_feats, hiddens[0], k)) for i in range(1, len(hiddens)): self.layers.append( ChebConv(hiddens[i - 1], hiddens[i], k)) self.cls = nn.Sequential( nn.Linear(hiddens[-1], out_feats), nn.LogSoftmax() ) def forward(self, g_arr, feat): for g, layer in zip(g_arr, self.layers): feat = self.pool(layer(g, feat, [2] * g.batch_size).transpose(-1, -2).unsqueeze(0))\ .squeeze(0).transpose(-1, -2) return self.cls(self.readout(g_arr[-1], feat)) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Save and restore models <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/save_and_restore_models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Model progress can be saved during—and after—training. This means a model can resume where it left off and avoid long training times. Saving also means you can share your model and others can recreate your work. When publishing research models and techniques, most machine learning practitioners share: * code to create the model, and * the trained weights, or parameters, for the model Sharing this data helps others understand how the model works and try it themselves with new data. Caution: Be careful with untrusted code—TensorFlow models are code. See [Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for details. ### Options There are different ways to save TensorFlow models—depending on the API you're using. This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For other approaches, see the TensorFlow [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide or [Saving in eager](https://www.tensorflow.org/guide/eager#object_based_saving). ## Setup ### Installs and imports Install and import TensorFlow and dependencies: ``` !pip install h5py pyyaml ``` ### Get an example dataset We'll use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) to train our model to demonstrate saving weights. To speed up these demonstration runs, only use the first 1000 examples: ``` from __future__ import absolute_import, division, print_function import os !pip install tf-nightly-2.0-preview import tensorflow as tf keras = tf.keras tf.__version__ (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() train_labels = train_labels[:1000] test_labels = test_labels[:1000] train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0 test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0 ``` ### Define a model Let's build a simple model we'll use to demonstrate saving and loading weights. ``` # Returns a short sequential model def create_model(): model = tf.keras.models.Sequential([ keras.layers.Dense(512, activation=tf.keras.activations.relu, input_shape=(784,)), keras.layers.Dropout(0.2), keras.layers.Dense(10, activation=tf.keras.activations.softmax) ]) model.compile(optimizer='adam', loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) return model # Create a basic model instance model = create_model() model.summary() ``` ## Save checkpoints during training The primary use case is to automatically save checkpoints *during* and at *the end* of training. This way you can use a trained model without having to retrain it, or pick-up training where you left of—in case the training process was interrupted. `tf.keras.callbacks.ModelCheckpoint` is a callback that performs this task. The callback takes a couple of arguments to configure checkpointing. ### Checkpoint callback usage Train the model and pass it the `ModelCheckpoint` callback: ``` checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create checkpoint callback cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) model = create_model() model.fit(train_images, train_labels, epochs = 10, validation_data = (test_images,test_labels), callbacks = [cp_callback]) # pass callback to training ``` This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch: ``` !ls {checkpoint_dir} ``` Create a new, untrained model. When restoring a model from only weights, you must have a model with the same architecture as the original model. Since it's the same model architecture, we can share weights despite that it's a different *instance* of the model. Now rebuild a fresh, untrained model, and evaluate it on the test set. An untrained model will perform at chance levels (~10% accuracy): ``` model = create_model() loss, acc = model.evaluate(test_images, test_labels) print("Untrained model, accuracy: {:5.2f}%".format(100*acc)) ``` Then load the weights from the checkpoint, and re-evaluate: ``` model.load_weights(checkpoint_path) loss,acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) ``` ### Checkpoint callback options The callback provides several options to give the resulting checkpoints unique names, and adjust the checkpointing frequency. Train a new model, and save uniquely named checkpoints once every 5-epochs: ``` # include the epoch in the file name. (uses `str.format`) checkpoint_path = "training_2/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, verbose=1, save_weights_only=True, # Save weights, every 5-epochs. period=5) model = create_model() model.fit(train_images, train_labels, epochs = 50, callbacks = [cp_callback], validation_data = (test_images,test_labels), verbose=0) ``` Now, look at the resulting checkpoints and choose the latest one: ``` ! ls {checkpoint_dir} latest = tf.train.latest_checkpoint(checkpoint_dir) latest ``` Note: the default tensorflow format only saves the 5 most recent checkpoints. To test, reset the model and load the latest checkpoint: ``` model = create_model() model.load_weights(latest) loss, acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) ``` ## What are these files? The above code stores the weights to a collection of [checkpoint](https://www.tensorflow.org/guide/saved_model#save_and_restore_variables)-formatted files that contain only the trained weights in a binary format. Checkpoints contain: * One or more shards that contain your model's weights. * An index file that indicates which weights are stored in a which shard. If you are only training a model on a single machine, you'll have one shard with the suffix: `.data-00000-of-00001` ## Manually save weights Above you saw how to load the weights into a model. Manually saving the weights is just as simple, use the `Model.save_weights` method. ``` # Save the weights model.save_weights('./checkpoints/my_checkpoint') # Restore the weights model = create_model() model.load_weights('./checkpoints/my_checkpoint') loss,acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) ``` ## Save the entire model The entire model can be saved to a file that contains the weight values, the model's configuration, and even the optimizer's configuration (depends on set up). This allows you to checkpoint a model and resume training later—from the exact same state—without access to the original code. Saving a fully-functional model is very useful—you can load them in TensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/tutorials/import-saved-model.html)) and then train and run them in web browsers, or convert them to run on mobile devices using TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_api#exporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_api#exporting_a_savedmodel_)) ### As an HDF5 file Keras provides a basic save format using the [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) standard. For our purposes, the saved model can be treated as a single binary blob. ``` model = create_model() # You need to use a keras.optimizer to restore the optimizer state from an HDF5 file. model.compile(optimizer='adam', loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5) # Save entire model to a HDF5 file model.save('my_model.h5') ``` Now recreate the model from that file: ``` # Recreate the exact same model, including weights and optimizer. new_model = keras.models.load_model('my_model.h5') new_model.summary() ``` Check its accuracy: ``` loss, acc = new_model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) ``` This technique saves everything: * The weight values * The model's configuration(architecture) * The optimizer configuration Keras saves models by inspecting the architecture. Currently, it is not able to save TensorFlow optimizers (from `tf.train`). When using those you will need to re-compile the model after loading, and you will loose the state of the optimizer. ## What's Next That was a quick guide to saving and loading in with `tf.keras`. * The [tf.keras guide](https://www.tensorflow.org/guide/keras) shows more about saving and loading models with `tf.keras`. * See [Saving in eager](https://www.tensorflow.org/guide/eager#object_based_saving) for saving during eager execution. * The [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide has low-level details about TensorFlow saving.
github_jupyter
<h1 style="text-align:center;text-decoration: underline">Stream Analytics Tutorial</h1> <h1>Overview</h1> <p>Welcome to the stream analytics tutorial for EpiData. In this tutorial we will perform near real-time stream analytics on sample weather data acquired from a simulated wireless sensor network.</p> <h2>Package and Module Imports</h2> <p>As a first step, we will import packages and modules required for this tutorial. Since <i>EpiData Context (ec)</i> is required to use the application, it is implicitly imported. Sample functions for near real-time analytics are avaialable in <i>EpiData Analytics</i> package. Other packages and modules, such as <i>datetime</i>, <i>pandas</i> and <i>matplotlib</i>, can also be imported at this time.</p> ``` #from epidata.context import ec from epidata.analytics import * %matplotlib inline from datetime import datetime, timedelta import pandas as pd import time import pylab as pl from IPython import display import json ``` <h2>Stream Analysis</h2> <h3>Function Definition</h3> <p>EpiData supports development and deployment of custom algorithms via Jupyter Notebook. Below, we define python functions for substituting extreme outliers and aggregating temperature measurements. These functions can be operated on near real-time and historic data. In this tutorial, we will apply the functions on near real-time data available from Kafka 'measurements' and 'measurements_cleansed' topics</p> ``` import pandas as pd import math, numbers def substitute_demo(df, meas_names, method="rolling", size=3): """ Substitute missing measurement values within a data frame, using the specified method. """ df["meas_value"].replace(250, np.nan, inplace=True) for meas_name in meas_names: if (method == "rolling"): if ((size % 2 == 0) and (size != 0)): size += 1 if df.loc[df["meas_name"]==meas_name].size > 0: indices = df.loc[df["meas_name"] == meas_name].index[df.loc[df["meas_name"] == meas_name]["meas_value"].apply( lambda x: not isinstance(x, basestring) and (x == None or np.isnan(x)))] substitutes = df.loc[df["meas_name"]==meas_name]["meas_value"].rolling( window=size, min_periods=1, center=True).mean() df["meas_value"].fillna(substitutes, inplace=True) df.loc[indices, "meas_flag"] = "substituted" df.loc[indices, "meas_method"] = "rolling average" else: raise ValueError("Unsupported substitution method: ", repr(method)) return df import pandas as pd import numpy as np import json def subgroup_statistics(row): row['start_time'] = np.min(row["ts"]) row["stop_time"] = np.max(row["ts"]) row["meas_summary_name"] = "statistics" row["meas_summary_value"] = json.dumps({'count': row["meas_value"].count(), 'mean': row["meas_value"].mean(), 'std': row["meas_value"].std(), 'min': row["meas_value"].min(), 'max': row["meas_value"].max()}) row["meas_summary_description"] = "descriptive statistics" return row def meas_statistics_demo(df, meas_names, method="standard"): """ Compute statistics on measurement values within a data frame, using the specified method. """ if (method == "standard"): df_grouped = df.loc[df["meas_name"].isin(meas_names)].groupby(["company", "site", "station", "sensor"], as_index=False) df_summary = df_grouped.apply(subgroup_statistics).loc[:, ["company", "site", "station", "sensor", "start_time", "stop_time", "event", "meas_name", "meas_summary_name", "meas_summary_value", "meas_summary_description"]].drop_duplicates() else: raise ValueError("Unsupported summary method: ", repr(method)) return df_summary ``` <h3>Transformations and Streams</h3> <p>The analytics algorithms are executed on near real-time data through transformations. A transformation specifies the function, its parameters and destination. The destination can be one of the database tables, namely <i>'measurements_cleansed'</i> or <i>'measurements_summary'</i>, or another Kafka topic.</p> <p>Once the transformations are defined, they are initiated via <i>ec.create_stream(transformations, data_source, batch_duration)</i> function call.</p> ``` #Stop current near real-time processing ec.stop_streaming() # Define tranformations and steam operations op1 = ec.create_transformation(substitute_demo, [["Temperature", "Wind_Speed"], "rolling", 3], "measurements_substituted") ec.create_stream([op1], "measurements") op2 = ec.create_transformation(identity, [], "measurements_cleansed") op3 = ec.create_transformation(meas_statistics, [["Temperature", "Wind_Speed"], "standard"], "measurements_summary") ec.create_stream([op2, op3],"measurements_substituted") # Start near real-time processing ec.start_streaming() ``` <h3>Data Ingestion</h3> <p>We can now start data ingestion from simulated wireless sensor network. To do so, you can download and run the <i>sensor_data_with_outliers.py</i> example shown in the image below.</p> <img src="./static/jupyter_tree.png"> <h3>Data Query and Visualization</h3> <p>We query the original and processed data from Kafka queue using Kafka Consumer. The data obtained from the quey is visualized using Bokeh charts.</p> ``` from bokeh.io import push_notebook, show, output_notebook from bokeh.layouts import row, column from bokeh.plotting import figure from bokeh.models import ColumnDataSource from kafka import KafkaConsumer import json from pandas.io.json import json_normalize output_notebook() plot1 = figure(plot_width=750, plot_height=200, x_axis_type='datetime', y_range=(30, 300)) plot2 = figure(plot_width=750, plot_height=200, x_axis_type='datetime', y_range=(30, 300)) df_kafka_init = pd.DataFrame(columns = ["ts", "meas_value"]) test_data_1 = ColumnDataSource(data=df_kafka_init.to_dict(orient='list')) test_data_2 = ColumnDataSource(data=df_kafka_init.to_dict(orient='list')) meas_name = "Temperature" plot1.circle("ts", "meas_value", source=test_data_1, legend=meas_name, line_color='orangered', line_width=1.5) line1 = plot1.line("ts", "meas_value", source=test_data_1, legend=meas_name, line_color='orangered', line_width=1.5) plot1.legend.location = "top_right" plot2.circle("ts", "meas_value", source=test_data_2, legend=meas_name, line_color='blue', line_width=1.5) line2 = plot2.line("ts", "meas_value", source=test_data_2, legend=meas_name, line_color='blue', line_width=1.5) plot2.legend.location = "top_right" consumer = KafkaConsumer() consumer.subscribe(['measurements', 'measurements_substituted']) delay = .1 handle = show(column(plot1, plot2), notebook_handle=True) for message in consumer: topic = message.topic measurements = json.loads(message.value) df_kafka = json_normalize(measurements) df_kafka["meas_value"] = np.nan if "meas_value" not in measurements else measurements["meas_value"] df_kafka = df_kafka.loc[df_kafka["meas_name"]==meas_name] df_kafka = df_kafka[["ts", "meas_value"]] df_kafka["ts"] = df_kafka["ts"].apply(lambda x: pd.to_datetime(x, unit='ms').tz_localize('UTC').tz_convert('US/Pacific')) if (not df_kafka.empty): if (topic == 'measurements'): test_data_1.stream(df_kafka.to_dict(orient='list')) if (topic == 'measurements_substituted'): test_data_2.stream(df_kafka.to_dict(orient='list')) push_notebook(handle=handle) time.sleep(delay) ``` <p>Another way to query and visualize processed data is using <i>ec.query_measurements_cleansed(..) and ec.query_measurements_summary(..)</i> functions. For our example, we specify paramaters that match sample data set, and query the aggregated values using <i>ec.query_measurements_summary(..)</i> function call.</p> ``` # QUERY MEASUREMENTS_CLEANSED TABLE primary_key={"company": "EpiData", "site": "San_Jose", "station":"WSN-1", "sensor": ["Temperature_Probe", "RH_Probe", "Anemometer"]} start_time = datetime.strptime('8/19/2017 00:00:00', '%m/%d/%Y %H:%M:%S') stop_time = datetime.strptime('8/20/2017 00:00:00', '%m/%d/%Y %H:%M:%S') df_cleansed = ec.query_measurements_cleansed(primary_key, start_time, stop_time) print "Number of records:", df_cleansed.count() df_cleansed_local = df_cleansed.toPandas() df_cleansed_local[df_cleansed_local["meas_name"]=="Temperature"].tail(10).sort_values(by="ts",ascending=False) # QUERY MEASUREMNTS_SUMMARY TABLE primary_key={"company": "EpiData", "site": "San_Jose", "station":"WSN-1", "sensor": ["Temperature_Probe"]} start_time = datetime.strptime('8/19/2017 00:00:00', '%m/%d/%Y %H:%M:%S') stop_time = datetime.strptime('8/20/2017 00:00:00', '%m/%d/%Y %H:%M:%S') last_index = -1 summary_result = pd.DataFrame() df_summary = ec.query_measurements_summary(primary_key, start_time, stop_time) df_summary_local = df_summary.toPandas() summary_keys = df_summary_local[["company", "site", "station", "sensor", "start_time", "stop_time", "meas_name", "meas_summary_name"]] summary_result = df_summary_local["meas_summary_value"].apply(json.loads).apply(pd.Series) summary_combined = pd.concat([summary_keys, summary_result], axis=1) summary_combined.tail(5) ``` <h3>Stop Stream Analytics</h3> <p>The transformations can be stopped at any time via <i>ec.stop_streaming()</i> function call<p> ``` #Stop current near real-time processing ec.stop_streaming() ``` <h2>Next Steps</h2> <p>Congratulations, you have successfully perfomed near real-time analytics on sample data aquired by a simulated wireless sensor network. The next step is to explore various capabilities of EpiData by creating your own custom analytics application!</p>
github_jupyter
``` import geemap geemap.show_youtube('OwjSJnGWKJs') ``` ## Update the geemap package If you run into errors with this notebook, please uncomment the line below to update the [geemap](https://github.com/giswqs/geemap#installation) package to the latest version from GitHub. Restart the Kernel (Menu -> Kernel -> Restart) to take effect. ``` # geemap.update_package() ``` ## Create an interactive map ### Use the Drawing tool to draw a rectangle on the map ``` Map = geemap.Map() Map ``` ## Generate a Landsat timelapse animation ``` import os out_dir = os.path.join(os.path.expanduser("~"), 'Downloads') if not os.path.exists(out_dir): os.makedirs(out_dir) label = 'Urban Growth in Las Vegas' Map.add_landsat_ts_gif(label=label, start_year=1985, bands=['Red', 'Green', 'Blue'], font_color='white', frames_per_second=10, progress_bar_color='blue') ``` ## Create Landsat timeseries ``` import os import ee import geemap Map = geemap.Map() Map ``` You and define an roi or draw a rectangle on the map ``` roi = ee.Geometry.Polygon( [[[-115.471773, 35.892718], [-115.471773, 36.409454], [-114.271283, 36.409454], [-114.271283, 35.892718], [-115.471773, 35.892718]]], None, False) # roi = Map.draw_last_feature collection = geemap.landsat_timeseries(roi=roi, start_year=1985, end_year=2019, start_date='06-10', end_date='09-20') print(collection.size().getInfo()) first_image = collection.first() vis = { 'bands': ['NIR', 'Red', 'Green'], 'min': 0, 'max': 4000, 'gamma': [1, 1, 1] } Map.addLayer(first_image, vis, 'First image') ``` ## Download ImageCollection as a GIF ``` # Define arguments for animation function parameters. video_args = { 'dimensions': 768, 'region': roi, 'framesPerSecond': 10, 'bands': ['NIR', 'Red', 'Green'], 'min': 0, 'max': 4000, 'gamma': [1, 1, 1] } work_dir = os.path.join(os.path.expanduser("~"), 'Downloads') if not os.path.exists(work_dir): os.makedirs(work_dir) out_gif = os.path.join(work_dir, "landsat_ts.gif") geemap.download_ee_video(collection, video_args, out_gif) ``` ## Add animated text to GIF ``` geemap.show_image(out_gif) texted_gif = os.path.join(work_dir, "landsat_ts_text.gif") geemap.add_text_to_gif(out_gif, texted_gif, xy=('3%', '5%'), text_sequence=1985, font_size=30, font_color='#ffffff', add_progress_bar=False) label = 'Urban Growth in Las Vegas' geemap.add_text_to_gif(texted_gif, texted_gif, xy=('2%', '88%'), text_sequence=label, font_size=30, font_color='#ffffff', progress_bar_color='cyan') geemap.show_image(texted_gif) ```
github_jupyter
``` # Copyright © 2020, Johan Vonk # SPDX-License-Identifier: MIT %matplotlib inline import numpy as np import pandas as pd import math import matplotlib.pyplot as plt from sklearn.manifold import MDS from sklearn.metrics import pairwise_distances import paho.mqtt.client as mqtt from threading import Timer import json from config import username, password import seaborn as sns measured=np.array([ [0, 37.9, 92.2, 95.2, 56.6, 95.5, 73.5, 56.7, 121.2, 73.9], [0, 0, 54.7, 71.8, 44.4, 59.4, 41.6, 21.9, 89.5, 46.8], [0, 0, 0, 60.3, 67.6, 27.3, 45.8, 42.3, 65.1, 43.5], [0, 0, 0, 0, 40.4, 87.1, 94.8, 78.9, 125.4, 25.4], [0, 0, 0, 0, 0, 86.9, 81.3, 61.5, 123.0, 28.0], [0, 0, 0, 0, 0, 0, 29.1, 39.1, 28.3, 67.2], [0, 0, 0, 0, 0, 0, 0, 20.6, 48.6, 70.0], [0, 0, 0, 0, 0, 0, 0, 0, 67.6, 53.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 105.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]) measured*=0.0254 measured+=measured.T model = MDS(n_components=2, metric=True, dissimilarity='precomputed', random_state=1, n_init=1000, max_iter=1000) positions = model.fit_transform(measured) positions -= positions[8] positions[:, 1]*=-1 theta=np.radians(221)+math.atan2(positions[5,1],positions[5,0]) positions=positions.dot([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) positions[:,0]-=positions[3,0] angles=np.radians([18,9,-18,135,156,-59,-23,77,-90,62]) plt.quiver(positions[:,0], positions[:,1], np.cos(angles), np.sin(angles)) devices=pd.DataFrame(columns=("name", "address", "version", "date")) df=pd.DataFrame(columns=("TIMESTAMP","SCANNER","ADVERTISER","TX POWER","RSSI","DISTANCE","ANGLE")) class RepeatTimer(Timer): def run(self): while not self.finished.wait(self.interval): self.function(*self.args, **self.kwargs) def switch_devices(client, devices): for device,payload in zip(devices["name"],np.random.choice(['scan', 'adv'],len(devices))): client.publish("blescan/ctrl/"+device, payload=payload) def on_connect(client, userdata, flags, rc): client.subscribe("blescan/data/#") client.publish("blescan/ctrl", payload="who") client.publish("blescan/ctrl", payload="int 2") def on_message(client, userdata, msg): source=msg.topic.rsplit('/', 1)[-1] data = json.loads(msg.payload.decode('ASCII').replace('""','"')) if "name" in data and data["name"] not in devices["name"].values: devices.loc[len(devices)]=[data["name"],data["address"],data["version"],data["date"]] elif "RSSI" in data and data["address"] in devices["address"].values and source in devices["name"].values: sc_pos=positions[int(source.replace("esp32-",""))-1] advertiser=devices[devices['address']==data['address']]['name'].values[0] ad_pos=positions[int(advertiser.replace("esp32-",""))-1] dx=sc_pos[0]-ad_pos[0] dy=sc_pos[1]-ad_pos[1] df.loc[len(df)]=[pd.Timestamp.now(),source,advertiser,data["txPwr"],data["RSSI"],math.sqrt(dx**2+dy**2),(math.atan2(dy,dx)-angles[int(advertiser.replace("esp32-",""))-1]+2*np.pi)%(2*np.pi)] client=mqtt.Client("reader") client.on_connect = on_connect client.on_message = on_message client.connect('mqtt.vonk', 1883) client.username_pw_set(username=username,password=password) timer = RepeatTimer(60, switch_devices, args=(client,devices)) try: client.loop_start() timer.start() except KeyboardInterrupt: client.loop_stop() timer.cancel() d=df.copy() d['TIMESTAMP']=pd.to_datetime(d['TIMESTAMP'],errors='coerce') d['SCANNER']=d['SCANNER'].astype(str) d['ADVERTISER']=d['ADVERTISER'].astype(str) d['TX POWER']=pd.to_numeric(d['TX POWER'],errors='coerce').astype('int8') d['RSSI']=pd.to_numeric(d['RSSI'],errors='coerce').astype('int8') d['DISTANCE']=pd.to_numeric(d['DISTANCE'],errors='coerce') d['ANGLE']=pd.to_numeric(d['ANGLE'],errors='coerce') angle_shift=(1-np.cos(2*d['ANGLE']))/d['ANGLE']*3-0.855 d['HUMAN PREDICTION']=10**((11.5511+d['TX POWER']-d['RSSI']-angle_shift)/10/2) d['HUMAN PREDICTION']=pd.to_numeric(d['HUMAN PREDICTION'],errors='coerce') d['HUMAN SLE']=np.log((d['DISTANCE']+1)/(d['HUMAN PREDICTION']+1))**2 d['HUMAN SLE']=pd.to_numeric(d['HUMAN SLE'],errors='coerce') print('Received {0:.5} messages per second.'.format(len(df)/(df.iloc[-1]["TIMESTAMP"]-df.iloc[0]["TIMESTAMP"]).total_seconds())) print("Human distance and angle mean squared log error is {0:.5}.".format(np.sum(d['HUMAN SLE'])/len(d))) plot_data=d.query('`HUMAN PREDICTION`>0 and `HUMAN PREDICTION`<4') sns.jointplot(x="DISTANCE", y="HUMAN PREDICTION", data=plot_data, kind="hex") d['DISTANCE PREDICTION']=10**((11.5511+d['TX POWER']-d['RSSI'])/10/2) d['DISTANCE PREDICTION']=pd.to_numeric(d['DISTANCE PREDICTION'],errors='coerce') d['DISTANCE SLE']=np.log((d['DISTANCE']+1)/(d['DISTANCE PREDICTION']+1))**2 d['DISTANCE SLE']=pd.to_numeric(d['DISTANCE SLE'],errors='coerce') print("Distance-only mean squared log error is {0:.5}.".format(np.sum(d['DISTANCE SLE'])/len(d))) plot_data=d.query('`DISTANCE PREDICTION`>0 and `DISTANCE PREDICTION`<4') sns.jointplot(x="DISTANCE", y="DISTANCE PREDICTION", data=plot_data, kind="hex") import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler power=10**((11.5511+d['TX POWER']-d['RSSI'])/20) cos_2angle=np.cos(2*d['ANGLE']) sin_2angle=np.sin(2*d['ANGLE']) cos_angle=np.cos(d['ANGLE']) sin_angle=np.sin(d['ANGLE']) X = pd.DataFrame([power,cos_2angle,sin_2angle,cos_angle,sin_angle]).T y = np.ravel(d['DISTANCE']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) scaler = StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) model = Sequential() model.add(Dense(8, kernel_initializer='normal', activation='relu', input_shape=(5,))) model.add(Dense(8, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) model.compile(loss='mean_squared_logarithmic_error', optimizer='sgd', metrics=['mse']) model.summary() history = model.fit(X_train, y_train, epochs=36, batch_size=32, verbose=1, validation_data=(X_test, y_test)) model.save('model') X_predict=scaler.transform(X) d['PREDICTION']=model.predict(X_predict, verbose=1) d['SLE']=np.log((d['DISTANCE']+1)/(d['PREDICTION']+1))**2 d.to_csv(f"pact_{d.iloc[0]['TIMESTAMP']:%Y%m%dT%H%M%S}.csv") plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss (msle)') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') print("ML model mean squared log error is {0:.5}.".format(np.sum(d['SLE'])/len(d))) print("False positive rate is {0:.3%}.".format(len(d.query('DISTANCE>0.9144 and PREDICTION<=0.9144'))/len(d))) print("False negative rate is {0:.3%}.".format(len(d.query('DISTANCE<=0.9144 and PREDICTION>0.9144'))/len(d))) print("True positive rate is {0:.3%}.".format(len(d.query('DISTANCE<=0.9144 and PREDICTION<=0.9144'))/len(d))) print("True negative rate is {0:.3%}.".format(len(d.query('DISTANCE>0.9144 and PREDICTION>0.9144'))/len(d))) plot_data=d.query('`PREDICTION`>0 and `PREDICTION`<4') sns.jointplot(x="DISTANCE", y="PREDICTION", data=plot_data, kind="hex") yard_power=0.9144 n_points=1000 angles=2*np.pi/n_points*np.arange(0, n_points) X_angles = scaler.transform(pd.DataFrame([np.full(len(angles),yard_power),np.cos(2*angles),np.sin(2*angles),np.cos(angles),np.sin(angles)]).T) result_angles=np.log10(model.predict(X_angles, verbose=1).flatten())*20 result_angles-=result_angles.max()-30 import plotly.express as px px.line_polar(r=result_angles, theta=angles*180/np.pi, line_close=True) #df[df['TIMESTAMP'] <= df['TIMESTAMP'].iloc[0]+pd.Timedelta(2,'D')] graph=df.sample(n=100) px.line(graph['TIMESTAMP'],graph['RSSI']) df ```
github_jupyter
##### Copyright 2019 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # Multilingual Universal Sentence Encoder Q&A Retrieval <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/retrieval_with_tf_hub_universal_encoder_qa.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This is a demo for using [Univeral Encoder Multilingual Q&A model](https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3) for question-answer retrieval of text, illustrating the use of **question_encoder** and **response_encoder** of the model. We use sentences from [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) paragraphs as the demo dataset, each sentence and its context (the text surrounding the sentence) is encoded into high dimension embeddings with the **response_encoder**. These embeddings are stored in an index built using the [simpleneighbors](https://pypi.org/project/simpleneighbors/) library for question-answer retrieval. On retrieval a random question is selected from the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset and encoded into high dimension embedding with the **question_encoder** and query the simpleneighbors index returning a list of approximate nearest neighbors in semantic space. ## Setup ``` %%capture #@title Setup Environment # Install the latest Tensorflow version. !pip install -q tensorflow_text !pip install -q simpleneighbors[annoy] !pip install -q nltk !pip install -q tqdm #@title Setup common imports and functions import json import nltk import os import pprint import random import simpleneighbors import urllib from IPython.display import HTML, display from tqdm.notebook import tqdm import tensorflow.compat.v2 as tf import tensorflow_hub as hub from tensorflow_text import SentencepieceTokenizer nltk.download('punkt') def download_squad(url): return json.load(urllib.request.urlopen(url)) def extract_sentences_from_squad_json(squad): all_sentences = [] for data in squad['data']: for paragraph in data['paragraphs']: sentences = nltk.tokenize.sent_tokenize(paragraph['context']) all_sentences.extend(zip(sentences, [paragraph['context']] * len(sentences))) return list(set(all_sentences)) # remove duplicates def extract_questions_from_squad_json(squad): questions = [] for data in squad['data']: for paragraph in data['paragraphs']: for qas in paragraph['qas']: if qas['answers']: questions.append((qas['question'], qas['answers'][0]['text'])) return list(set(questions)) def output_with_highlight(text, highlight): output = "<li> " i = text.find(highlight) while True: if i == -1: output += text break output += text[0:i] output += '<b>'+text[i:i+len(highlight)]+'</b>' text = text[i+len(highlight):] i = text.find(highlight) return output + "</li>\n" def display_nearest_neighbors(query_text, answer_text=None): query_embedding = model.signatures['question_encoder'](tf.constant([query_text]))['outputs'][0] search_results = index.nearest(query_embedding, n=num_results) if answer_text: result_md = ''' <p>Random Question from SQuAD:</p> <p>&nbsp;&nbsp;<b>%s</b></p> <p>Answer:</p> <p>&nbsp;&nbsp;<b>%s</b></p> ''' % (query_text , answer_text) else: result_md = ''' <p>Question:</p> <p>&nbsp;&nbsp;<b>%s</b></p> ''' % query_text result_md += ''' <p>Retrieved sentences : <ol> ''' if answer_text: for s in search_results: result_md += output_with_highlight(s, answer_text) else: for s in search_results: result_md += '<li>' + s + '</li>\n' result_md += "</ol>" display(HTML(result_md)) ``` Run the following code block to download and extract the SQuAD dataset into: * **sentences** is a list of (text, context) tuples - each paragraph from the SQuAD dataset are splitted into sentences using nltk library and the sentence and paragraph text forms the (text, context) tuple. * **questions** is a list of (question, answer) tuples. Note: You can use this demo to index the SQuAD train dataset or the smaller dev dataset (1.1 or 2.0) by selecting the **squad_url** below. ``` #@title Download and extract SQuAD data squad_url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' #@param ["https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json", "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"] squad_json = download_squad(squad_url) sentences = extract_sentences_from_squad_json(squad_json) questions = extract_questions_from_squad_json(squad_json) print("%s sentences, %s questions extracted from SQuAD %s" % (len(sentences), len(questions), squad_url)) print("\nExample sentence and context:\n") sentence = random.choice(sentences) print("sentence:\n") pprint.pprint(sentence[0]) print("\ncontext:\n") pprint.pprint(sentence[1]) print() ``` The following code block setup the tensorflow graph **g** and **session** with the [Univeral Encoder Multilingual Q&A model](https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3)'s **question_encoder** and **response_encoder** signatures. ``` #@title Load model from tensorflow hub module_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3" #@param ["https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3", "https://tfhub.dev/google/universal-sentence-encoder-qa/3"] model = hub.load(module_url) ``` The following code block compute the embeddings for all the text, context tuples and store them in a [simpleneighbors](https://pypi.org/project/simpleneighbors/) index using the **response_encoder**. ``` #@title Compute embeddings and build simpleneighbors index batch_size = 100 encodings = model.signatures['response_encoder']( input=tf.constant([sentences[0][0]]), context=tf.constant([sentences[0][1]])) index = simpleneighbors.SimpleNeighbors( len(encodings['outputs'][0]), metric='angular') print('Computing embeddings for %s sentences' % len(sentences)) slices = zip(*(iter(sentences),) * batch_size) num_batches = int(len(sentences) / batch_size) for s in tqdm(slices, total=num_batches): response_batch = list([r for r, c in s]) context_batch = list([c for r, c in s]) encodings = model.signatures['response_encoder']( input=tf.constant(response_batch), context=tf.constant(context_batch) ) for batch_index, batch in enumerate(response_batch): index.add_one(batch, encodings['outputs'][batch_index]) index.build() print('simpleneighbors index for %s sentences built.' % len(sentences)) ``` On retrieval, the question is encoded using the **question_encoder** and the question embedding is used to query the simpleneighbors index. ``` #@title Retrieve nearest neighbors for a random question from SQuAD num_results = 25 #@param {type:"slider", min:5, max:40, step:1} query = random.choice(questions) display_nearest_neighbors(query[0], query[1]) ```
github_jupyter
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exercices in chapter 16. # Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: ``` # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import numpy.random as rnd import os import sys # to make this notebook's output stable across runs rnd.seed(42) # To plot pretty figures and animations %matplotlib nbagg import matplotlib import matplotlib.animation as animation import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "rl" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) ``` # Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`: ``` import gym ``` Next we will load the MsPacman environment, version 0. ``` env = gym.make('MsPacman-v0') ``` Let's initialize the environment by calling is `reset()` method. This returns an observation: ``` obs = env.reset() ``` Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later. ``` obs.shape ``` An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array: ``` img = env.render(mode="rgb_array") ``` Let's plot this image: ``` plt.figure(figsize=(5,4)) plt.imshow(img) plt.axis("off") save_fig("MsPacman") plt.show() ``` Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case): ``` (img == obs).all() ``` Let's create a little helper function to plot an environment: ``` def plot_environment(env, figsize=(5,4)): plt.close() # or else nbagg sometimes plots in the previous cell plt.figure(figsize=figsize) img = env.render(mode="rgb_array") plt.imshow(img) plt.axis("off") plt.show() ``` Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like: ``` env.action_space ``` `Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps: ``` env.reset() for step in range(110): env.step(3) #left for step in range(40): env.step(8) #lower-left ``` Where are we now? ``` plot_environment(env) ``` The `step()` function actually returns several important objects: ``` obs, reward, done, info = env.step(0) ``` The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image: ``` obs.shape ``` The environment also tells the agent how much reward it got during the last step: ``` reward ``` When the game is over, the environment returns `done=True`: ``` done ``` Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating). ``` info ``` Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame: ``` frames = [] n_max_steps = 1000 n_change_steps = 10 obs = env.reset() for step in range(n_max_steps): img = env.render(mode="rgb_array") frames.append(img) if step % n_change_steps == 0: action = env.action_space.sample() # play randomly obs, reward, done, info = env.step(action) if done: break ``` Now show the animation (it's a bit jittery within Jupyter): ``` def update_scene(num, frames, patch): patch.set_data(frames[num]) return patch, def plot_animation(frames, repeat=False, interval=40): plt.close() # or else nbagg sometimes plots in the previous cell fig = plt.figure() patch = plt.imshow(frames[0]) plt.axis('off') return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval) video = plot_animation(frames) plt.show() ``` Once you have finished playing with an environment, you should close it to free up resources: ``` env.close() ``` To code our first learning agent, we will be using a simpler environment: the Cart-Pole. # A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright. ``` env = gym.make("CartPole-v0") obs = env.reset() obs ``` The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. ## Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook If Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole: ``` from PIL import Image, ImageDraw try: from pyglet.gl import gl_info openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function except Exception: openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function def render_cart_pole(env, obs): if openai_cart_pole_rendering: # use OpenAI gym's rendering function return env.render(mode="rgb_array") else: # rendering for the cart pole environment (in case OpenAI gym can't do it) img_w = 600 img_h = 400 cart_w = img_w // 12 cart_h = img_h // 15 pole_len = img_h // 3.5 pole_w = img_w // 80 + 1 x_width = 2 max_ang = 0.2 bg_col = (255, 255, 255) cart_col = 0x000000 # Blue Green Red pole_col = 0x669acc # Blue Green Red pos, vel, ang, ang_vel = obs img = Image.new('RGB', (img_w, img_h), bg_col) draw = ImageDraw.Draw(img) cart_x = pos * img_w // x_width + img_w // x_width cart_y = img_h * 95 // 100 top_pole_x = cart_x + pole_len * np.sin(ang) top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang) draw.line((0, cart_y, img_w, cart_y), fill=0) draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole return np.array(img) def plot_cart_pole(env, obs): plt.close() # or else nbagg sometimes plots in the previous cell img = render_cart_pole(env, obs) plt.imshow(img) plt.axis("off") plt.show() plot_cart_pole(env, obs) ``` Now let's look at the action space: ``` env.action_space ``` Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls: ``` obs = env.reset() while True: obs, reward, done, info = env.step(0) if done: break plt.close() # or else nbagg sometimes plots in the previous cell img = render_cart_pole(env, obs) plt.imshow(img) plt.axis("off") save_fig("cart_pole_plot") ``` Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead: ``` obs = env.reset() while True: obs, reward, done, info = env.step(1) if done: break plot_cart_pole(env, obs) ``` Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. # A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works: ``` frames = [] n_max_steps = 1000 n_change_steps = 10 obs = env.reset() for step in range(n_max_steps): img = render_cart_pole(env, obs) frames.append(img) # hard-coded policy position, velocity, angle, angular_velocity = obs if angle < 0: action = 0 else: action = 1 obs, reward, done, info = env.step(action) if done: break video = plot_animation(frames) plt.show() ``` Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! # Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. ``` import tensorflow as tf from tensorflow.contrib.layers import fully_connected # 1. Specify the network architecture n_inputs = 4 # == env.observation_space.shape[0] n_hidden = 4 # it's a simple task, we don't need more than this n_outputs = 1 # only outputs the probability of accelerating left initializer = tf.contrib.layers.variance_scaling_initializer() # 2. Build the neural network X = tf.placeholder(tf.float32, shape=[None, n_inputs]) hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) outputs = fully_connected(hidden, n_outputs, activation_fn=tf.nn.sigmoid, weights_initializer=initializer) # 3. Select a random action based on the estimated probabilities p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) init = tf.global_variables_initializer() ``` In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game: ``` n_max_steps = 1000 frames = [] with tf.Session() as sess: init.run() obs = env.reset() for step in range(n_max_steps): img = render_cart_pole(env, obs) frames.append(img) action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)}) obs, reward, done, info = env.step(action_val[0][0]) if done: break env.close() ``` Now let's look at how well this randomly initialized policy network performed: ``` video = plot_animation(frames) plt.show() ``` Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`): ``` import tensorflow as tf from tensorflow.contrib.layers import fully_connected tf.reset_default_graph() n_inputs = 4 n_hidden = 4 n_outputs = 1 learning_rate = 0.01 initializer = tf.contrib.layers.variance_scaling_initializer() X = tf.placeholder(tf.float32, shape=[None, n_inputs]) y = tf.placeholder(tf.float32, shape=[None, n_outputs]) hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) logits = fully_connected(hidden, n_outputs, activation_fn=None) outputs = tf.nn.sigmoid(logits) # probability of action 0 (left) p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits) optimizer = tf.train.AdamOptimizer(learning_rate) training_op = optimizer.minimize(cross_entropy) init = tf.global_variables_initializer() saver = tf.train.Saver() ``` We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done. ``` n_environments = 10 n_iterations = 1000 envs = [gym.make("CartPole-v0") for _ in range(n_environments)] observations = [env.reset() for env in envs] with tf.Session() as sess: init.run() for iteration in range(n_iterations): target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0. action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas}) for env_index, env in enumerate(envs): obs, reward, done, info = env.step(action_val[env_index][0]) observations[env_index] = obs if not done else env.reset() saver.save(sess, "./my_policy_net_basic.ckpt") for env in envs: env.close() def render_policy_net(model_path, action, X, n_max_steps = 1000): frames = [] env = gym.make("CartPole-v0") obs = env.reset() with tf.Session() as sess: saver.restore(sess, model_path) for step in range(n_max_steps): img = render_cart_pole(env, obs) frames.append(img) action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)}) obs, reward, done, info = env.step(action_val[0][0]) if done: break env.close() return frames frames = render_policy_net("./my_policy_net_basic.ckpt", action, X) video = plot_animation(frames) plt.show() ``` Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. # Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_. The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did. ``` import tensorflow as tf from tensorflow.contrib.layers import fully_connected tf.reset_default_graph() n_inputs = 4 n_hidden = 4 n_outputs = 1 learning_rate = 0.01 initializer = tf.contrib.layers.variance_scaling_initializer() X = tf.placeholder(tf.float32, shape=[None, n_inputs]) hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) logits = fully_connected(hidden, n_outputs, activation_fn=None) outputs = tf.nn.sigmoid(logits) # probability of action 0 (left) p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) y = 1. - tf.to_float(action) cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits) optimizer = tf.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(cross_entropy) gradients = [grad for grad, variable in grads_and_vars] gradient_placeholders = [] grads_and_vars_feed = [] for grad, variable in grads_and_vars: gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape()) gradient_placeholders.append(gradient_placeholder) grads_and_vars_feed.append((gradient_placeholder, variable)) training_op = optimizer.apply_gradients(grads_and_vars_feed) init = tf.global_variables_initializer() saver = tf.train.Saver() def discount_rewards(rewards, discount_rate): discounted_rewards = np.zeros(len(rewards)) cumulative_rewards = 0 for step in reversed(range(len(rewards))): cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate discounted_rewards[step] = cumulative_rewards return discounted_rewards def discount_and_normalize_rewards(all_rewards, discount_rate): all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards] flat_rewards = np.concatenate(all_discounted_rewards) reward_mean = flat_rewards.mean() reward_std = flat_rewards.std() return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards] discount_rewards([10, 0, -50], discount_rate=0.8) discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8) env = gym.make("CartPole-v0") n_games_per_update = 10 n_max_steps = 1000 n_iterations = 250 save_iterations = 10 discount_rate = 0.95 with tf.Session() as sess: init.run() for iteration in range(n_iterations): print("\rIteration: {}".format(iteration), end="") all_rewards = [] all_gradients = [] for game in range(n_games_per_update): current_rewards = [] current_gradients = [] obs = env.reset() for step in range(n_max_steps): action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)}) obs, reward, done, info = env.step(action_val[0][0]) current_rewards.append(reward) current_gradients.append(gradients_val) if done: break all_rewards.append(current_rewards) all_gradients.append(current_gradients) all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate) feed_dict = {} for var_index, gradient_placeholder in enumerate(gradient_placeholders): mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] for game_index, rewards in enumerate(all_rewards) for step, reward in enumerate(rewards)], axis=0) feed_dict[gradient_placeholder] = mean_gradients sess.run(training_op, feed_dict=feed_dict) if iteration % save_iterations == 0: saver.save(sess, "./my_policy_net_pg.ckpt") env.close() frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000) video = plot_animation(frames) plt.show() ``` # Markov Chains ``` transition_probabilities = [ [0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3 [0.0, 0.0, 0.9, 0.1], # from s1 to ... [0.0, 1.0, 0.0, 0.0], # from s2 to ... [0.0, 0.0, 0.0, 1.0], # from s3 to ... ] n_max_steps = 50 def print_sequence(start_state=0): current_state = start_state print("States:", end=" ") for step in range(n_max_steps): print(current_state, end=" ") if current_state == 3: break current_state = rnd.choice(range(4), p=transition_probabilities[current_state]) else: print("...", end="") print() for _ in range(10): print_sequence() ``` # Markov Decision Process ``` transition_probabilities = [ [[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc. [[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]], [None, [0.8, 0.1, 0.1], None], ] rewards = [ [[+10, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, -50]], [[0, 0, 0], [+40, 0, 0], [0, 0, 0]], ] possible_actions = [[0, 1, 2], [0, 2], [1]] def policy_fire(state): return [0, 2, 1][state] def policy_random(state): return rnd.choice(possible_actions[state]) def policy_safe(state): return [0, 0, 1][state] class MDPEnvironment(object): def __init__(self, start_state=0): self.start_state=start_state self.reset() def reset(self): self.total_rewards = 0 self.state = self.start_state def step(self, action): next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action]) reward = rewards[self.state][action][next_state] self.state = next_state self.total_rewards += reward return self.state, reward def run_episode(policy, n_steps, start_state=0, display=True): env = MDPEnvironment() if display: print("States (+rewards):", end=" ") for step in range(n_steps): if display: if step == 10: print("...", end=" ") elif step < 10: print(env.state, end=" ") action = policy(env.state) state, reward = env.step(action) if display and step < 10: if reward: print("({})".format(reward), end=" ") if display: print("Total rewards =", env.total_rewards) return env.total_rewards for policy in (policy_fire, policy_random, policy_safe): all_totals = [] print(policy.__name__) for episode in range(1000): all_totals.append(run_episode(policy, n_steps=100, display=(episode<5))) print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals))) print() ``` # Q-Learning Q-Learning will learn the optimal policy by watching the random policy play. ``` n_states = 3 n_actions = 3 n_steps = 20000 alpha = 0.01 gamma = 0.99 exploration_policy = policy_random q_values = np.full((n_states, n_actions), -np.inf) for state, actions in enumerate(possible_actions): q_values[state][actions]=0 env = MDPEnvironment() for step in range(n_steps): action = exploration_policy(env.state) state = env.state next_state, reward = env.step(action) next_value = np.max(q_values[next_state]) # greedy policy q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value) def optimal_policy(state): return np.argmax(q_values[state]) q_values all_totals = [] for episode in range(1000): all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5))) print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals))) print() ``` # Learning to play MsPacman using Deep Q-Learning ``` env = gym.make("MsPacman-v0") obs = env.reset() obs.shape env.action_space ``` ## Preprocessing Preprocessing the images is optional but greatly speeds up training. ``` mspacman_color = np.array([210, 164, 74]).mean() def preprocess_observation(obs): img = obs[1:176:2, ::2] # crop and downsize img = img.mean(axis=2) # to greyscale img[img==mspacman_color] = 0 # Improve contrast img = (img - 128) / 128 - 1 # normalize from -1. to 1. return img.reshape(88, 80, 1) img = preprocess_observation(obs) plt.figure(figsize=(11, 7)) plt.subplot(121) plt.title("Original observation (160×210 RGB)") plt.imshow(obs) plt.axis("off") plt.subplot(122) plt.title("Preprocessed observation (88×80 greyscale)") plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray") plt.axis("off") save_fig("preprocessing_plot") plt.show() ``` ## Build DQN ``` tf.reset_default_graph() from tensorflow.contrib.layers import convolution2d, fully_connected input_height = 88 input_width = 80 input_channels = 1 conv_n_maps = [32, 64, 64] conv_kernel_sizes = [(8,8), (4,4), (3,3)] conv_strides = [4, 2, 1] conv_paddings = ["SAME"]*3 conv_activation = [tf.nn.relu]*3 n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each n_hidden = 512 hidden_activation = tf.nn.relu n_outputs = env.action_space.n initializer = tf.contrib.layers.variance_scaling_initializer() learning_rate = 0.01 def q_network(X_state, scope): prev_layer = X_state conv_layers = [] with tf.variable_scope(scope) as scope: for n_maps, kernel_size, stride, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation): prev_layer = convolution2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=activation, weights_initializer=initializer) conv_layers.append(prev_layer) last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs]) hidden = fully_connected(last_conv_layer_flat, n_hidden, activation_fn=hidden_activation, weights_initializer=initializer) outputs = fully_connected(hidden, n_outputs, activation_fn=None) trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)} return outputs, trainable_vars X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels]) actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns copy_ops = [actor_var.assign(critic_vars[var_name]) for var_name, actor_var in actor_vars.items()] copy_critic_to_actor = tf.group(*copy_ops) with tf.variable_scope("train"): X_action = tf.placeholder(tf.int32, shape=[None]) y = tf.placeholder(tf.float32, shape=[None, 1]) q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs), axis=1, keep_dims=True) cost = tf.reduce_mean(tf.square(y - q_value)) global_step = tf.Variable(0, trainable=False, name='global_step') optimizer = tf.train.AdamOptimizer(learning_rate) training_op = optimizer.minimize(cost, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver() actor_vars from collections import deque replay_memory_size = 10000 replay_memory = deque([], maxlen=replay_memory_size) def sample_memories(batch_size): indices = rnd.permutation(len(replay_memory))[:batch_size] cols = [[], [], [], [], []] # state, action, reward, next_state, continue for idx in indices: memory = replay_memory[idx] for col, value in zip(cols, memory): col.append(value) cols = [np.array(col) for col in cols] return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1) eps_min = 0.05 eps_max = 1.0 eps_decay_steps = 50000 import sys def epsilon_greedy(q_values, step): epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps) if rnd.rand() < epsilon: return rnd.randint(n_outputs) # random action else: return np.argmax(q_values) # optimal action n_steps = 100000 # total number of training steps training_start = 1000 # start training after 1,000 game iterations training_interval = 3 # run a training step every 3 game iterations save_steps = 50 # save the model every 50 training steps copy_steps = 25 # copy the critic to the actor every 25 training steps discount_rate = 0.95 skip_start = 90 # Skip the start of every game (it's just waiting time). batch_size = 50 iteration = 0 # game iterations checkpoint_path = "./my_dqn.ckpt" done = True # env needs to be reset with tf.Session() as sess: if os.path.isfile(checkpoint_path): saver.restore(sess, checkpoint_path) else: init.run() while True: step = global_step.eval() if step >= n_steps: break iteration += 1 print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="") if done: # game over, start again obs = env.reset() for skip in range(skip_start): # skip boring game iterations at the start of each game obs, reward, done, info = env.step(0) state = preprocess_observation(obs) # Actor evaluates what to do q_values = actor_q_values.eval(feed_dict={X_state: [state]}) action = epsilon_greedy(q_values, step) # Actor plays obs, reward, done, info = env.step(action) next_state = preprocess_observation(obs) # Let's memorize what happened replay_memory.append((state, action, reward, next_state, 1.0 - done)) state = next_state if iteration < training_start or iteration % training_interval != 0: continue # Critic learns X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size) next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val}) y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True) training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val}) # Regularly copy critic to actor if step % copy_steps == 0: copy_critic_to_actor.run() # And save regularly if step % save_steps == 0: saver.save(sess, checkpoint_path) ``` # Exercise solutions Coming soon...
github_jupyter
[![AnalyticsDojo](https://github.com/rpi-techfundamentals/spring2019-materials/blob/master/fig/final-logo.png?raw=1)](http://rpi.analyticsdojo.com) <center><h1>Basic Text Feature Creation in Python</h1></center> <center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center> # Basic Text Feature Creation in Python ``` !wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/train.csv !wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/test.csv import numpy as np import pandas as pd import pandas as pd train= pd.read_csv('train.csv') test = pd.read_csv('test.csv') #Print to standard output, and see the results in the "log" section below after running your script train.head() #Print to standard output, and see the results in the "log" section below after running your script train.describe() train.dtypes #Let's look at the age field. We can see "NaN" (which indicates missing values).s train["Age"] #Now let's recode. medianAge=train["Age"].median() print ("The Median age is:", medianAge, " years old.") train["Age"] = train["Age"].fillna(medianAge) #Option 2 all in one shot! train["Age"] = train["Age"].fillna(train["Age"].median()) train["Age"] #For Recoding Data, we can use what we know of selecting rows and columns train["Embarked"] = train["Embarked"].fillna("S") train.loc[train["Embarked"] == "S", "EmbarkedRecode"] = 0 train.loc[train["Embarked"] == "C", "EmbarkedRecode"] = 1 train.loc[train["Embarked"] == "Q", "EmbarkedRecode"] = 2 # We can also use something called a lambda function # You can read more about the lambda function here. #http://www.python-course.eu/lambda.php gender_fn = lambda x: 0 if x == 'male' else 1 train['Gender'] = train['Sex'].map(gender_fn) #or we can do in one shot train['NameLength'] = train['Name'].map(lambda x: len(x)) train['Age2'] = train['Age'].map(lambda x: x*x) train #We can start to create little small functions that will find a string. def has_title(name): for s in ['Mr.', 'Mrs.', 'Miss.', 'Dr.', 'Sir.']: if name.find(s) >= 0: return True return False #Now we are using that separate function in another function. title_fn = lambda x: 1 if has_title(x) else 0 #Finally, we call the function for name train['Title'] = train['Name'].map(title_fn) test['Title']= train['Name'].map(title_fn) test #Writing to File submission=pd.DataFrame(test.loc[:,['PassengerId','Survived']]) #Any files you save will be available in the output tab below submission.to_csv('submission.csv', index=False) ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/W1D5_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week 1, Day 5, Tutorial 3 # Dimensionality Reduction and reconstruction __Content creators:__ Alex Cayco Gajic, John Murray __Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom --- # Tutorial Objectives In this notebook we'll learn to apply PCA for dimensionality reduction, using a classic dataset that is often used to benchmark machine learning algorithms: MNIST. We'll also learn how to use PCA for reconstruction and denoising. Overview: - Perform PCA on MNIST - Calculate the variance explained - Reconstruct data with different numbers of PCs - (Bonus) Examine denoising using PCA You can learn more about MNIST dataset [here](https://en.wikipedia.org/wiki/MNIST_database). ``` # @title Video 1: PCA for dimensionality reduction from IPython.display import YouTubeVideo video = YouTubeVideo(id="oO0bbInoO_0", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` --- # Setup Run these cells to get the tutorial started. ``` # Imports import numpy as np import matplotlib.pyplot as plt # @title Figure Settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # @title Helper Functions def plot_variance_explained(variance_explained): """ Plots eigenvalues. Args: variance_explained (numpy array of floats) : Vector of variance explained for each PC Returns: Nothing. """ plt.figure() plt.plot(np.arange(1, len(variance_explained) + 1), variance_explained, '--k') plt.xlabel('Number of components') plt.ylabel('Variance explained') plt.show() def plot_MNIST_reconstruction(X, X_reconstructed): """ Plots 9 images in the MNIST dataset side-by-side with the reconstructed images. Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable X_reconstructed (numpy array of floats) : Data matrix each column corresponds to a different random variable Returns: Nothing. """ plt.figure() ax = plt.subplot(121) k = 0 for k1 in range(3): for k2 in range(3): k = k + 1 plt.imshow(np.reshape(X[k, :], (28, 28)), extent=[(k1 + 1) * 28, k1 * 28, (k2 + 1) * 28, k2 * 28], vmin=0, vmax=255) plt.xlim((3 * 28, 0)) plt.ylim((3 * 28, 0)) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False) ax.set_xticks([]) ax.set_yticks([]) plt.title('Data') plt.clim([0, 250]) ax = plt.subplot(122) k = 0 for k1 in range(3): for k2 in range(3): k = k + 1 plt.imshow(np.reshape(np.real(X_reconstructed[k, :]), (28, 28)), extent=[(k1 + 1) * 28, k1 * 28, (k2 + 1) * 28, k2 * 28], vmin=0, vmax=255) plt.xlim((3 * 28, 0)) plt.ylim((3 * 28, 0)) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False) ax.set_xticks([]) ax.set_yticks([]) plt.clim([0, 250]) plt.title('Reconstructed') plt.tight_layout() def plot_MNIST_sample(X): """ Plots 9 images in the MNIST dataset. Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable Returns: Nothing. """ fig, ax = plt.subplots() k = 0 for k1 in range(3): for k2 in range(3): k = k + 1 plt.imshow(np.reshape(X[k, :], (28, 28)), extent=[(k1 + 1) * 28, k1 * 28, (k2+1) * 28, k2 * 28], vmin=0, vmax=255) plt.xlim((3 * 28, 0)) plt.ylim((3 * 28, 0)) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False) plt.clim([0, 250]) ax.set_xticks([]) ax.set_yticks([]) plt.show() def plot_MNIST_weights(weights): """ Visualize PCA basis vector weights for MNIST. Red = positive weights, blue = negative weights, white = zero weight. Args: weights (numpy array of floats) : PCA basis vector Returns: Nothing. """ fig, ax = plt.subplots() cmap = plt.cm.get_cmap('seismic') plt.imshow(np.real(np.reshape(weights, (28, 28))), cmap=cmap) plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False) plt.clim(-.15, .15) plt.colorbar(ticks=[-.15, -.1, -.05, 0, .05, .1, .15]) ax.set_xticks([]) ax.set_yticks([]) plt.show() def add_noise(X, frac_noisy_pixels): """ Randomly corrupts a fraction of the pixels by setting them to random values. Args: X (numpy array of floats) : Data matrix frac_noisy_pixels (scalar) : Fraction of noisy pixels Returns: (numpy array of floats) : Data matrix + noise """ X_noisy = np.reshape(X, (X.shape[0] * X.shape[1])) N_noise_ixs = int(X_noisy.shape[0] * frac_noisy_pixels) noise_ixs = np.random.choice(X_noisy.shape[0], size=N_noise_ixs, replace=False) X_noisy[noise_ixs] = np.random.uniform(0, 255, noise_ixs.shape) X_noisy = np.reshape(X_noisy, (X.shape[0], X.shape[1])) return X_noisy def change_of_basis(X, W): """ Projects data onto a new basis. Args: X (numpy array of floats) : Data matrix each column corresponding to a different random variable W (numpy array of floats) : new orthonormal basis columns correspond to basis vectors Returns: (numpy array of floats) : Data matrix expressed in new basis """ Y = np.matmul(X, W) return Y def get_sample_cov_matrix(X): """ Returns the sample covariance matrix of data X. Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable Returns: (numpy array of floats) : Covariance matrix """ X = X - np.mean(X, 0) cov_matrix = 1 / X.shape[0] * np.matmul(X.T, X) return cov_matrix def sort_evals_descending(evals, evectors): """ Sorts eigenvalues and eigenvectors in decreasing order. Also aligns first two eigenvectors to be in first two quadrants (if 2D). Args: evals (numpy array of floats) : Vector of eigenvalues evectors (numpy array of floats) : Corresponding matrix of eigenvectors each column corresponds to a different eigenvalue Returns: (numpy array of floats) : Vector of eigenvalues after sorting (numpy array of floats) : Matrix of eigenvectors after sorting """ index = np.flip(np.argsort(evals)) evals = evals[index] evectors = evectors[:, index] if evals.shape[0] == 2: if np.arccos(np.matmul(evectors[:, 0], 1 / np.sqrt(2) * np.array([1, 1]))) > np.pi / 2: evectors[:, 0] = -evectors[:, 0] if np.arccos(np.matmul(evectors[:, 1], 1 / np.sqrt(2)*np.array([-1, 1]))) > np.pi / 2: evectors[:, 1] = -evectors[:, 1] return evals, evectors def pca(X): """ Performs PCA on multivariate data. Eigenvalues are sorted in decreasing order Args: X (numpy array of floats) : Data matrix each column corresponds to a different random variable Returns: (numpy array of floats) : Data projected onto the new basis (numpy array of floats) : Vector of eigenvalues (numpy array of floats) : Corresponding matrix of eigenvectors """ X = X - np.mean(X, 0) cov_matrix = get_sample_cov_matrix(X) evals, evectors = np.linalg.eigh(cov_matrix) evals, evectors = sort_evals_descending(evals, evectors) score = change_of_basis(X, evectors) return score, evectors, evals def plot_eigenvalues(evals, limit=True): """ Plots eigenvalues. Args: (numpy array of floats) : Vector of eigenvalues Returns: Nothing. """ plt.figure() plt.plot(np.arange(1, len(evals) + 1), evals, 'o-k') plt.xlabel('Component') plt.ylabel('Eigenvalue') plt.title('Scree plot') if limit: plt.show() ``` --- # Section 1: Perform PCA on MNIST The MNIST dataset consists of a 70,000 images of individual handwritten digits. Each image is a 28x28 pixel grayscale image. For convenience, each 28x28 pixel image is often unravelled into a single 784 (=28*28) element vector, so that the whole dataset is represented as a 70,000 x 784 matrix. Each row represents a different image, and each column represents a different pixel. Enter the following cell to load the MNIST dataset and plot the first nine images. ``` from sklearn.datasets import fetch_openml mnist = fetch_openml(name='mnist_784') X = mnist.data plot_MNIST_sample(X) ``` The MNIST dataset has an extrinsic dimensionality of 784, much higher than the 2-dimensional examples used in the previous tutorials! To make sense of this data, we'll use dimensionality reduction. But first, we need to determine the intrinsic dimensionality $K$ of the data. One way to do this is to look for an "elbow" in the scree plot, to determine which eigenvalues are signficant. ## Exercise 1: Scree plot of MNIST In this exercise you will examine the scree plot in the MNIST dataset. **Steps:** - Perform PCA on the dataset and examine the scree plot. - When do the eigenvalues appear (by eye) to reach zero? (**Hint:** use `plt.xlim` to zoom into a section of the plot). ``` help(pca) help(plot_eigenvalues) ################################################# ## TO DO for students: perform PCA and plot the eigenvalues ################################################# # perform PCA # score, evectors, evals = ... # plot the eigenvalues # plot_eigenvalues(evals, limit=False) # plt.xlim(...) # limit x-axis up to 100 for zooming # to_remove solution # perform PCA score, evectors, evals = pca(X) # plot the eigenvalues with plt.xkcd(): plot_eigenvalues(evals, limit=False) plt.xlim([0, 100]) # limit x-axis up to 100 for zooming ``` --- # Section 2: Calculate the variance explained The scree plot suggests that most of the eigenvalues are near zero, with fewer than 100 having large values. Another common way to determine the intrinsic dimensionality is by considering the variance explained. This can be examined with a cumulative plot of the fraction of the total variance explained by the top $K$ components, i.e., \begin{equation} \text{var explained} = \frac{\sum_{i=1}^K \lambda_i}{\sum_{i=1}^N \lambda_i} \end{equation} The intrinsic dimensionality is often quantified by the $K$ necessary to explain a large proportion of the total variance of the data (often a defined threshold, e.g., 90%). ## Exercise 2: Plot the explained variance In this exercise you will plot the explained variance. **Steps:** - Fill in the function below to calculate the fraction variance explained as a function of the number of principal componenets. **Hint:** use `np.cumsum`. - Plot the variance explained using `plot_variance_explained`. **Questions:** - How many principal components are required to explain 90% of the variance? - How does the intrinsic dimensionality of this dataset compare to its extrinsic dimensionality? ``` help(plot_variance_explained) def get_variance_explained(evals): """ Calculates variance explained from the eigenvalues. Args: evals (numpy array of floats) : Vector of eigenvalues Returns: (numpy array of floats) : Vector of variance explained """ ################################################# ## TO DO for students: calculate the explained variance using the equation ## from Section 2. # Comment once you've filled in the function raise NotImplementedError("Student excercise: calculate explaine variance!") ################################################# # cumulatively sum the eigenvalues csum = ... # normalize by the sum of eigenvalues variance_explained = ... return variance_explained ################################################# ## TO DO for students: call the function and plot the variance explained ################################################# # calculate the variance explained variance_explained = ... # Uncomment to plot the variance explained # plot_variance_explained(variance_explained) # to_remove solution def get_variance_explained(evals): """ Plots eigenvalues. Args: (numpy array of floats) : Vector of eigenvalues Returns: Nothing. """ # cumulatively sum the eigenvalues csum = np.cumsum(evals) # normalize by the sum of eigenvalues variance_explained = csum / np.sum(evals) return variance_explained # calculate the variance explained variance_explained = get_variance_explained(evals) with plt.xkcd(): plot_variance_explained(variance_explained) ``` --- # Section 3: Reconstruct data with different numbers of PCs Now we have seen that the top 100 or so principal components of the data can explain most of the variance. We can use this fact to perform *dimensionality reduction*, i.e., by storing the data using only 100 components rather than the samples of all 784 pixels. Remarkably, we will be able to reconstruct much of the structure of the data using only the top 100 components. To see this, recall that to perform PCA we projected the data $\bf X$ onto the eigenvectors of the covariance matrix: \begin{equation} \bf S = X W \end{equation} Since $\bf W$ is an orthogonal matrix, ${\bf W}^{-1} = {\bf W}^T$. So by multiplying by ${\bf W}^T$ on each side we can rewrite this equation as \begin{equation} {\bf X = S W}^T. \end{equation} This now gives us a way to reconstruct the data matrix from the scores and loadings. To reconstruct the data from a low-dimensional approximation, we just have to truncate these matrices. Let's call ${\bf S}_{1:K}$ and ${\bf W}_{1:K}$ as keeping only the first $K$ columns of this matrix. Then our reconstruction is: \begin{equation} {\bf \hat X = S}_{1:K} ({\bf W}_{1:K})^T. \end{equation} ## Exercise 3: Data reconstruction Fill in the function below to reconstruct the data using different numbers of principal components. **Steps:** * Fill in the following function to reconstruct the data based on the weights and scores. Don't forget to add the mean! * Make sure your function works by reconstructing the data with all $K=784$ components. The two images should look identical. ``` help(plot_MNIST_reconstruction) def reconstruct_data(score, evectors, X_mean, K): """ Reconstruct the data based on the top K components. Args: score (numpy array of floats) : Score matrix evectors (numpy array of floats) : Matrix of eigenvectors X_mean (numpy array of floats) : Vector corresponding to data mean K (scalar) : Number of components to include Returns: (numpy array of floats) : Matrix of reconstructed data """ ################################################# ## TO DO for students: Reconstruct the original data in X_reconstructed # Comment once you've filled in the function raise NotImplementedError("Student excercise: reconstructing data function!") ################################################# # Reconstruct the data from the score and eigenvectors # Don't forget to add the mean!! X_reconstructed = ... return X_reconstructed K = 784 ################################################# ## TO DO for students: Calculate the mean and call the function, then plot ## the original and the recostructed data ################################################# # Reconstruct the data based on all components X_mean = ... X_reconstructed = ... # Plot the data and reconstruction # plot_MNIST_reconstruction(X, X_reconstructed) # to_remove solution def reconstruct_data(score, evectors, X_mean, K): """ Reconstruct the data based on the top K components. Args: score (numpy array of floats) : Score matrix evectors (numpy array of floats) : Matrix of eigenvectors X_mean (numpy array of floats) : Vector corresponding to data mean K (scalar) : Number of components to include Returns: (numpy array of floats) : Matrix of reconstructed data """ # Reconstruct the data from the score and eigenvectors # Don't forget to add the mean!! X_reconstructed = np.matmul(score[:, :K], evectors[:, :K].T) + X_mean return X_reconstructed K = 784 # Reconstruct the data based on all components X_mean = np.mean(X, 0) X_reconstructed = reconstruct_data(score, evectors, X_mean, K) # Plot the data and reconstruction with plt.xkcd(): plot_MNIST_reconstruction(X, X_reconstructed) ``` ## Interactive Demo: Reconstruct the data matrix using different numbers of PCs Now run the code below and experiment with the slider to reconstruct the data matrix using different numbers of principal components. **Steps** * How many principal components are necessary to reconstruct the numbers (by eye)? How does this relate to the intrinsic dimensionality of the data? * Do you see any information in the data with only a single principal component? ``` # @title # @markdown Make sure you execute this cell to enable the widget! def refresh(K=100): X_reconstructed = reconstruct_data(score, evectors, X_mean, K) plot_MNIST_reconstruction(X, X_reconstructed) plt.title('Reconstructed, K={}'.format(K)) _ = widgets.interact(refresh, K=(1, 784, 10)) ``` ## Exercise 4: Visualization of the weights Next, let's take a closer look at the first principal component by visualizing its corresponding weights. **Steps:** * Enter `plot_MNIST_weights` to visualize the weights of the first basis vector. * What structure do you see? Which pixels have a strong positive weighting? Which have a strong negative weighting? What kinds of images would this basis vector differentiate? * Try visualizing the second and third basis vectors. Do you see any structure? What about the 100th basis vector? 500th? 700th? ``` help(plot_MNIST_weights) ################################################# ## TO DO for students: plot the weights calling the plot_MNIST_weights function ################################################# # Plot the weights of the first principal component # plot_MNIST_weights(...) # to_remove solution # Plot the weights of the first principal component with plt.xkcd(): plot_MNIST_weights(evectors[:, 0]) ``` --- # Summary * In this tutorial, we learned how to use PCA for dimensionality reduction by selecting the top principal components. This can be useful as the intrinsic dimensionality ($K$) is often less than the extrinsic dimensionality ($N$) in neural data. $K$ can be inferred by choosing the number of eigenvalues necessary to capture some fraction of the variance. * We also learned how to reconstruct an approximation of the original data using the top $K$ principal components. In fact, an alternate formulation of PCA is to find the $K$ dimensional space that minimizes the reconstruction error. * Noise tends to inflate the apparent intrinsic dimensionality, however the higher components reflect noise rather than new structure in the data. PCA can be used for denoising data by removing noisy higher components. * In MNIST, the weights corresponding to the first principal component appear to discriminate between a 0 and 1. We will discuss the implications of this for data visualization in the following tutorial. --- # Bonus: Examine denoising using PCA In this lecture, we saw that PCA finds an optimal low-dimensional basis to minimize the reconstruction error. Because of this property, PCA can be useful for denoising corrupted samples of the data. ## Exercise 5: Add noise to the data In this exercise you will add salt-and-pepper noise to the original data and see how that affects the eigenvalues. **Steps:** - Use the function `add_noise` to add noise to 20% of the pixels. - Then, perform PCA and plot the variance explained. How many principal components are required to explain 90% of the variance? How does this compare to the original data? ``` help(add_noise) ################################################################### # Insert your code here to: # Add noise to the data # Plot noise-corrupted data # Perform PCA on the noisy data # Calculate and plot the variance explained ################################################################### np.random.seed(2020) # set random seed X_noisy = ... # score_noisy, evectors_noisy, evals_noisy = ... # variance_explained_noisy = ... # plot_MNIST_sample(X_noisy) # plot_variance_explained(variance_explained_noisy) # to_remove solution np.random.seed(2020) # set random seed X_noisy = add_noise(X, .2) score_noisy, evectors_noisy, evals_noisy = pca(X_noisy) variance_explained_noisy = get_variance_explained(evals_noisy) with plt.xkcd(): plot_MNIST_sample(X_noisy) plot_variance_explained(variance_explained_noisy) ``` ## Exercise 6: Denoising Next, use PCA to perform denoising by projecting the noise-corrupted data onto the basis vectors found from the original dataset. By taking the top K components of this projection, we can reduce noise in dimensions orthogonal to the K-dimensional latent space. **Steps:** - Subtract the mean of the noise-corrupted data. - Project the data onto the basis found with the original dataset (`evectors`, not `evectors_noisy`) and take the top $K$ components. - Reconstruct the data as normal, using the top 50 components. - Play around with the amount of noise and K to build intuition. ``` ################################################################### # Insert your code here to: # Subtract the mean of the noise-corrupted data # Project onto the original basis vectors evectors # Reconstruct the data using the top 50 components # Plot the result ################################################################### X_noisy_mean = ... projX_noisy = ... X_reconstructed = ... # plot_MNIST_reconstruction(X_noisy, X_reconstructed) # to_remove solution X_noisy_mean = np.mean(X_noisy, 0) projX_noisy = np.matmul(X_noisy - X_noisy_mean, evectors) X_reconstructed = reconstruct_data(projX_noisy, evectors, X_noisy_mean, 50) with plt.xkcd(): plot_MNIST_reconstruction(X_noisy, X_reconstructed) ```
github_jupyter
# Quick Start **A tutorial on Renormalized Mutual Information** We describe in detail the implementation of RMI estimation in the very simple case of a Gaussian distribution. Of course, in this case the optimal feature is given by the Principal Component Analysis ``` import numpy as np # parameters of the Gaussian distribution mu = [0,0] sigma = [[1, 0.5],[0.5,2]] # extract the samples N_samples = 100000 samples = np.random.multivariate_normal(mu, sigma, N_samples ) ``` Visualize the distribution with a 2D histogram ``` import matplotlib.pyplot as plt plt.figure() plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) plt.gca().set_aspect("equal") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.title("$P_x(x)$") plt.show() ``` ## Estimate Renormalized Mutual Information of a feature Now we would like to find a one-dimensional function $f(x_1,x_2)$ to describe this 2d distribution. ### Simplest feature For example, we could consider ignoring one of the variables: ``` def f(x): # feature # shape [N_samples, N_features=1] return x[:,0][...,None] def grad_f(x): # gradient # shape [N_samples, N_features=1, N_x=2] grad_f = np.zeros([len(x),1,2]) grad_f[...,0] = 1 return grad_f def feat_and_grad(x): return f(x), grad_f(x) ``` Let's plot it on top of the distribution ``` # Range of the plot xmin = -4 xmax = 4 # Number of points in the grid N = 100 # We evaluate the feature on a grid x_linspace = np.linspace(xmin, xmax, N) x1_grid, x2_grid = np.meshgrid(x_linspace, x_linspace, indexing='ij') x_points = np.array([x1_grid.flatten(), x2_grid.flatten()]).T feature = f(x_points) gradient = grad_f(x_points) plt.figure() plt.title("Feature contours") plt.xlabel(r"$x_1$") plt.ylabel(r"$x_2$") plt.gca().set_aspect('equal') # Draw the input distribution on the background plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) # Draw the contour lines of the extracted feature plt.xlim([-4,4]) plt.ylim([-4,4]) plt.contour(x1_grid, x2_grid, feature.reshape([N,N]),15, linewidths=4, cmap=plt.cm.Blues) plt.colorbar() plt.show() ``` $f(x)=x_1$ is clearly a linear function that ignores $x_2$ and increases in the $x_1$ direction **How much information does it give us on $x$?** If we used common mutual information, it would be $\infty$, because $f$ is a deterministic function, and $H(y|x) = -\log \delta(0)$. Let's estimate the renormalized mutual information ``` import rmi.estimation as inf samples = np.random.multivariate_normal(mu, sigma, N_samples ) feature = f(samples) gradient = grad_f(samples) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) ``` Please note that we perform the plot by calculating the feature on a uniform grid. But, to estimate RMI, the feature should be calculated on x **sampled** from the $x$ distribution. In particular, we have ``` p_y, delta_y = inf.produce_P(feature) entropy = inf.Entropy(p_y, delta_y) fterm = inf.RegTerm(gradient) print("Entropy\t %2.2f" % entropy) print("Fterm\t %2.2f" % fterm) print("Renormalized Mutual Information (x,f(x)): %2.2f" % (entropy + fterm)) ``` Renormalized Mutual Information is the sum of the two terms - Entropy - RegTerm ### Reparametrization invariance Do we gain information if we increase the variance of the feature? For example, let's rescale our feature. Clearly the information on $x$ should remain the same ``` scale_factor = 4 feature *= scale_factor gradient *= scale_factor RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) p_y, delta_y = inf.produce_P(feature) entropy = inf.Entropy(p_y, delta_y) fterm = inf.RegTerm(gradient) print("Entropy\t %2.2f" % entropy) print("Fterm\t %2.2f" % fterm) ``` Let's try even a non-linear transformation. As long as it is invertible, we will get the same RMI ``` # For example y_lin = np.linspace(-4,4,100) f_lin = y_lin**3 + 5*y_lin plt.figure() plt.title("Reparametrization function") plt.plot(y_lin, f_lin) plt.show() feature_new = feature**3 + 5*feature gradient_new = 3*feature[...,None]**2*gradient +5*gradient# chain rule... RMI = inf.RenormalizedMutualInformation(feature_new, gradient_new, 2000) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) p_y, delta_y = inf.produce_P(feature_new) entropy = inf.Entropy(p_y, delta_y) fterm = inf.RegTerm(gradient_new) print("Entropy\t %2.2f" % entropy) print("Fterm\t %2.2f" % fterm) ``` In this case, we have to increase the number of bins to calculate the Entropy with reasonable accuracy. The reason is that the feature now spans a quite larger range but changes very rapidly in the few bins around zero (but we use a uniform binning when estimating the entropy). ``` plt.hist(feature_new,1000) plt.show() ``` And if we instead appliead a **non-invertible** transformation? The consequence is clear: we will **lose information**. Consider for example: ``` feature_new = feature**2 gradient_new = 2*feature[...,None]*gradient # chain rule... RMI_2 = inf.RenormalizedMutualInformation(feature_new, gradient_new, 2000) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI_2) p_y, delta_y = inf.produce_P(feature_new) entropy = inf.Entropy(p_y, delta_y) fterm = inf.RegTerm(gradient_new) print("Entropy\t %2.2f" % entropy) print("Fterm\t %2.2f" % fterm) plt.hist(feature_new,1000) plt.show() ``` The careful observer will be able to guess how much information we have lost in this case: our feature is centered in zero and we squared it. We lose the sign, and on average the half of the samples have one sign and the half the other sign. One bit of information is lost. The difference is $\log 2$! ``` deltaRMI = RMI - RMI_2 print("delta RMI %2.3f" %deltaRMI) print("log 2 = %2.3f" % np.log(2)) ``` ### Another feature Let's take another linear feature, for example, this time in the other direction ``` def f(x): # feature # shape [N_samples, N_features=1] return x[:,1][...,None] def grad_f(x): # gradient # shape [N_samples, N_features=1, N_x=2] grad_f = np.zeros([len(x),1,2]) grad_f[...,1] = 1 return grad_f def feat_and_grad(x): return f(x), grad_f(x) feature = f(x_points) gradient = grad_f(x_points) plt.figure() plt.title("Feature contours") plt.xlabel(r"$x_1$") plt.ylabel(r"$x_2$") plt.gca().set_aspect('equal') # Draw the input distribution on the background samples = np.random.multivariate_normal(mu, sigma, N_samples ) plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) # Draw the contour lines of the extracted feature plt.xlim([-4,4]) plt.ylim([-4,4]) plt.contour(x1_grid, x2_grid, feature.reshape([N,N]),15, linewidths=4, cmap=plt.cm.Blues) plt.colorbar() plt.show() feature = f(samples) gradient = grad_f(samples) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) ``` This feature seems to better describe our input. This is reasonable: it lies closer to the direction of larger fluctuation of the distribution. What is the best linear feature that we can take? ``` # Let's define a linear feature def linear(x, th): """ linear increasing in the direction given by angle th. Args: x (array_like): [N_samples, 2] array of samples th (float): direction of the feature in which it increases Returns: feature (array_like): [N_samples, 1] feature grad_feature (array_like): [N_samples, 1, N_x] gradient of the feature """ Feature = x[:, 0]*np.cos(th) + x[:, 1]*np.sin(th) Grad1 = np.full(np.shape(x)[0], np.cos(th)) Grad2 = np.full(np.shape(x)[0], np.sin(th)) return Feature, np.array([Grad1, Grad2]).T samples = np.random.multivariate_normal(mu, sigma, N_samples ) th_lin = np.linspace(0,np.pi, 30) rmis = [] for th in th_lin: feature, grad = linear(samples, th) rmi = inf.RenormalizedMutualInformation(feature,grad) rmis.append([th,rmi]) rmis = np.array(rmis) plt.figure() plt.title("Best linear feature") plt.xlabel("$\theta$") plt.ylabel(r"$RMI(x,f_\theta(x))$") plt.plot(rmis[:,0], rmis[:,1]) plt.show() best_theta = th_lin[np.argmax(rmis[:,1])] ``` Let's plot the feature with the parameter that gives the largest Renormalized Mutual Information ``` feature, gradient = linear(x_points,best_theta) plt.figure() plt.title("Feature contours") plt.xlabel(r"$x_1$") plt.ylabel(r"$x_2$") plt.gca().set_aspect('equal') # Draw the input distribution on the background samples = np.random.multivariate_normal(mu, sigma, N_samples ) plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) # Draw the contour lines of the extracted feature plt.xlim([-4,4]) plt.ylim([-4,4]) plt.contour(x1_grid, x2_grid, feature.reshape([N,N]),15, linewidths=4, cmap=plt.cm.Blues) plt.colorbar() plt.show() feature, gradient = linear(samples,best_theta) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) ``` This is the same feature that we would get if we considered the first Principal Component of PCA. This is the only case in which this is possible: PCA can only extract linear features, and in particular, since it only takes into account the covariance matrix of the distribution, it can provide the best feature only for a Gaussian (which is identified by its mean and covariance matrix) ``` import rmi.pca samples = np.random.multivariate_normal(mu, sigma, N_samples ) g_pca = rmi.pca.pca(samples,1) eigenv = g_pca.w[0] angle_pca = np.arctan(eigenv[1]/eigenv[0]) feature, gradient = linear(samples,angle_pca) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) print("best found angle %2.2f" %best_theta) print("pca direction %2.2f" %angle_pca) ``` We recall that in this very special case, and as long as the proposed feature is only rotated (without changing the scale), the simple maximization of the Feature Entropy would have given the same result. Again, this only holds for linear features, and in particular for those whose gradient vector is not affected by a change of parameters). As soon as we use a non-linear feature, just looking at the entropy of the feature is not enough anymore - entropy is not reparametrization invariant. Also, given an arbitrary deterministic feature function, RMI is the only quantity that allows to estimate it's dependence with its arguments ## Feature Optimization Let's try now to optimize a neural network to extract a feature. In this case, as we already discussed, we will still get a linear feature ``` import rmi.neuralnets as nn # Define the layout of the neural network # The cost function is implicit when choosing the model RMIOptimizer rmi_optimizer = nn.RMIOptimizer( layers=[ nn.K.layers.Dense(30, activation="relu",input_shape=(2,)), nn.K.layers.Dense(1) ]) # Compile the network === choose the optimizer to use during the training rmi_optimizer.compile(optimizer=nn.tf.optimizers.Adam(1e-3)) # Print the table with the structure of the network rmi_optimizer.summary() # Define an objects that handles the training rmi_net = nn.Net(rmi_optimizer) # Perform the training of the neural network batchsize = 1000 N_train = 5000 def get_batch(): return np.random.multivariate_normal(mu, sigma, batchsize) rmi_net.fit_generator(get_batch, N_train) # Plot the training history (value of RMI) # The large fluctuations can be reduced by increasing the batchsize rmi_net.plot_history() ``` Calculate the feature on the input points: just apply the object `rmi_net`! ``` feature = rmi_net(x_points) plt.figure() plt.title("Feature contours") plt.xlabel(r"$x_1$") plt.ylabel(r"$x_2$") plt.gca().set_aspect('equal') # Draw the input distribution on the background samples = np.random.multivariate_normal(mu, sigma, N_samples ) plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) # Draw the contour lines of the extracted feature plt.xlim([-4,4]) plt.ylim([-4,4]) plt.contour(x1_grid, x2_grid, feature.reshape([N,N]),15, linewidths=4, cmap=plt.cm.Blues) plt.colorbar() plt.show() ``` To calculate also the gradient of the feature, one can use the function `get_feature_and_grad` ``` feature, gradient = rmi_net.get_feature_and_grad(samples) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) ``` ## Tradeoff between simplicity and compression When optimizing renormalized mutual information to obtain a **meaningful feature** (in the sense of representation learning), one should avoid to employ too powerful networks. A good feature should set a convenient tradeoff between its **"simplicity"** (i.e. number of parameters, or how "smooth" the feature is) and its **information content** (i.e. how much the input space is compressed in a smaller dimension). In other words, useful representations should be "well-behaved", even at the price of reducing their renormalized mutual information. We can show this idea in a straight forward example ``` # Let's define a linear feature def cheating_feature(x): Feature = x[:, 0]*np.cos(best_theta) + x[:, 1]*np.sin(best_theta) step_size = 3 step_width = 1/12 step_argument = x[:, 0]*np.cos(best_theta+np.pi/2) + x[:, 1]*np.sin(best_theta+np.pi/2) Feature +=step_size*np.tanh(step_argument/step_width) Grad1 = np.full(x.shape[0], np.cos(best_theta)) Grad2 = np.full(x.shape[0], np.sin(best_theta)) Grad1 += step_size/step_width*np.cos(best_theta+np.pi/2)/np.cosh(step_argument/step_width)**2 Grad2 += step_size/step_width*np.sin(best_theta+np.pi/2)/np.cosh(step_argument/step_width)**2 return Feature, np.array([Grad1, Grad2]).T samples = np.random.multivariate_normal(mu, sigma, N_samples ) feature, gradient = cheating_feature(x_points) plt.figure() plt.title("Feature contours") plt.xlabel(r"$x_1$") plt.ylabel(r"$x_2$") plt.gca().set_aspect('equal') # Draw the input distribution on the background samples = np.random.multivariate_normal(mu, sigma, N_samples ) plt.hist2d(*samples.T, bins=100, cmap=plt.cm.binary) # Draw the contour lines of the extracted feature plt.xlim([-4,4]) plt.ylim([-4,4]) plt.contour(x1_grid, x2_grid, feature.reshape([N,N]),15, linewidths=4, cmap=plt.cm.Blues) plt.colorbar() plt.show() feature, gradient = cheating_feature(samples) RMI = inf.RenormalizedMutualInformation(feature, gradient) print("Renormalized Mutual Information (x,f(x)): %2.2f" % RMI) p_y, delta_y = inf.produce_P(feature) entropy = inf.Entropy(p_y, delta_y) fterm = inf.RegTerm(gradient) print("Entropy\t %2.2f" % entropy) print("Fterm\t %2.2f" % fterm) ``` This feature has a larger mutual information than the linear one. It is still increasing in the direction of largest variance of $x$. However, it contains a _jump_ in the orthogonal direction. This jump allows to encode a "bit" of additional information (about the orthogonal coordinate), allowing to unambiguously distinguish whether $x$ was extracted on the left or right side of the Gaussian. In principle, one can add an arbitrary number of jumps until the missing coordinate can be identified with arbitrary precision. This feature would have an arbitrary high renormalized mutual information (as it should be, since it contains more information on $x$). However, such a non-smooth feature is definitely not useful for feature extraction! One can avoid these extremely compressed representations by encouraging simpler features (like smooth, or a neural network with a limited number of layers for example). ``` # Histogram of the feature # The continuous value of x encodes one coordinate, # the two peaks of the distribution provide additional information # on the second coordinate! plt.hist(feature,1000) plt.show() ``` ## Conclusions This technique can be applied to - estimate the information that a deterministic feature $f(x)$ carries about a (higher-dimensional) $x$ - in other words, to estimate how useful is a given "macroscopic" quantity to describe a system? - extract non-linear representations in an unsupervised way, by optimizinng Renormalized Mutual Information. For more examples: - see the notebooks with the spiral-shaped distribution for an example with a non-Gaussian input distribution - see the Wave Packet and Liquid Drop notebooks for proof-of-concept applications in physics (or in general for higher-dimensional input spaces and extraction of a two-dimensional feature) At the moment, only one-dimensional or two-dimensional features can be extracted with the neural network class. This is due to the implementation of the Entropy estimation, which currently is based on a histogram - which is not efficient in larger dimensions. An alternative (differentiable) way to estimate the Entropy will allow to extend this technique to also extract features with more than 2 dimensions.
github_jupyter
# UPDATE This notebook is no longer being used. Please look at the most recent version, NLSS_V2 found in the same directory. My project looks at the Northernlion Live Super Show, a thrice a week Twitch stream which has been running since 2013. Unlike a video service like Youtube, the live nature of Twitch allows for a more conversational 'live comment stream' to accompany a video. My goal is to gather statistics about the episodes and pair this with a list of all the comments corrosponding to a video. Using this I will attempt to recognize patterns in Twitch comments based on the video statistics. ``` # Every returned Out[] is displayed, not just the last one. from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import nltk import pandas as pd import numpy as np ``` A text file containing basic information about every NLSS episode must be organized into something usable ``` with open(r'data\NLSS_Dockets.txt') as f: file = f.read() shows = file.split('\n\n') #split into every show shows[:5] ``` This text file was taken from a webpage and so it contains links to Nick's livestream. Let's get rid of this since it's not needed. ``` index = 0 for s in shows: shows[index] = s.replace(' Nick View', '') index+=1 shows[-10:] ``` Now I need to split up each show into their meaningful parts. Let's start with the games played on each episode. ``` games = [] for s in shows: g = s.split('\n') #Text files has games on second line games.append(g[1]) games ``` I'll have to clean these up later to make sure all the games are spelled consistantly. ``` #Number of dockets, not individual games len(games) ``` Now lets take a look at the first lines of the text file which contain the date of the show and the people who joined the show that day. They are seperated in the file by (). ``` date_crew = [] for s in shows: dc = s.split('\n')[0] date_crew.append(dc) print(date_crew) ``` I'm going to use regex to split these up. ``` import re date = [] crew = [] for entry in date_crew: foo = re.search(r'\((.*)\)', entry).group(1) d = foo.split(r')')[0] date.append(d) c = foo.split(r'(')[-1] crew.append(c) ``` Now I'll start creating a data frame of this information ``` date_df = pd.DataFrame(date, columns = ["Date"]) date_df.head() games_df = pd.DataFrame(games, columns = ["Docket"]) games_df.head() crew_df = pd.DataFrame(crew, columns = ["Crew"]) crew_df.head() ``` Now combine them ``` nlss_df = pd.DataFrame() nlss_df['Date'] = date_df['Date'] nlss_df['Crew'] = crew_df['Crew'] nlss_df['Docket'] = games_df['Docket'] nlss_df.head() nlss_df.describe() ``` I noticed that some lines had a link called "(continued)" in the games list. I want to get rid of these. While I'm at it, let's make the games docket contain the games as lists. ``` improved = [] #For each docket for d in nlss_df['Docket']: #Split docket into list of games d = d.split(r',') #For each game for g in d: #If game matches string to remove if g == r" (continued)" or g == r" (Continued)": #Remove game d.remove(g) improved.append(d) nlss_df['Docket'] = improved nlss_df.head() nlss_df['Crew'] ``` I want to split on "w/" so each crew member is individual item. I'm also going to put them into a list. ``` improved = [] #For each cast of crew for e in nlss_df['Crew']: #Split cast into list of members e = e.split(r',') #For each member for m in e: #If member contains a /w if r'w/' in m: both = m.split(r'w/') e.remove(m) e.extend(both) improved.append(e) improved[:20] ``` Strip extra spaces ``` fullstripped = [] for entry in improved: stripped = [] for member in entry: member = member.strip(' ') stripped.append(member) fullstripped.append(stripped) fullstripped[:10] ``` Let's make the names consistant. Luckily I know the aliases that are used. Let's see what we're working with. ``` names = [] for entry in fullstripped: for user in entry: if user not in names: names.append(user) print(names) ``` Translated: Northernlion, RockLeeSmile, CobaltStreak, AlpacaPatrol, LastGreyWolf, HCJustin, BaerTaffy, JSmithOTI, Sinvicta, DanGheesling, MALF, FlackBlag, TotalBiscuit, LovelyMomo, Blueman, BaerTaffy, MathasGames, Crendor, BananasaurusRex, NOTREAL, AlpacaPatrol, DanGheesling, BananasaurusRex, MALF, Arumba, BaerTaffy, CobaltStreak, MALF, Magresta, Northernlion, JSmithOTI, RockLeeSmile, LovelyMomo, MikeBithell, RedPandaGamer, OhmWrecker, PrescriptionPixel, Green9090 ``` foo = "Northernlion, RockLeeSmile, CobaltStreak, AlpacaPatrol, LastGreyWolf, HCJustin, BaerTaffy, JSmithOTI, Sinvicta, DanGheesling, MALF, FlackBlag, TotalBiscuit, LovelyMomo, Blueman, BaerTaffy, MathasGames, Crendor, BananasaurusRex, NOTREAL, AlpacaPatrol, DanGheesling, NOTREAL, BananasaurusRex, MALF, Arumba, BaerTaffy, CobaltStreak, MALF, Magresta, Northernlion, JSmithOTI, RockLeeSmile, LovelyMomo, MikeBithell, RedPandaGamer, OhmWrecker, PrescriptionPixel, Green9090" translated = foo.split(", ") translated guests = [] for cast in fullstripped: guests.append([translated[names.index(user)] for user in cast]) #Replace first names with second names guests[0] ``` Looking better. Let's swap it into our DF. ``` nlss_df['Crew'] = guests nlss_df.head() ``` # Adding more stats File from https://sullygnome.com/channel/Northernlion/365/streams This version can only go back 365 days. Can we create a column for date that matches nlss_df format? If so, we can combine overlapping stats. I also have a larger CSV which I'm working on in FullCSV.ipynb. I will combine these once formated correctly. ``` import os import glob print(os.getcwd()) allFiles = glob.glob(r"data\*.csv") stream_df = pd.DataFrame() l = [] for foo in allFiles: stream_df = pd.read_csv(foo,index_col=None, header=0) l.append(stream_df) stream_df = pd.concat(l) #stream_df = pd.read_csv(r'StreamStats365.csv') stream_df formatted = [] order = [1,0,2] for date in stream_df['Stream start time']: dmy = date.split(' ')[1:-1] #Date/Month/Year dmy[0] = dmy[0][:-2] #Remove day suffixes mdy = [dmy[i] for i in order] formatted.append(str(mdy[0] + " " + mdy[1] + ", " + mdy[2])) formatted[:15] stream_df["Date"] = formatted stream_df = stream_df.reset_index() stream_df.index = stream_df["index"] stream_df = stream_df.drop('index', axis=1) stream_df.head() ``` There was a day where an extra non-NLSS stream happened. It messes up our ordering so let's remove it. ``` stream_df[stream_df["Date"]=='January 4, 2017'] stream_df = stream_df[stream_df['Unnamed: 0'] != 0] stream_df[stream_df["Date"]=='January 4, 2017'] combined = nlss_df.merge(stream_df) #drop eronious columns combined = combined.drop('Games', 1) combined = combined.drop('Unnamed: 0', 1) nlss_df.head() combined.head() result = pd.concat([nlss_df, combined], axis=1) #Removes repeat columns result = result.T.groupby(level=0).first().T #Reorder columns result = result[['Date','Crew','Docket','Stream start time','End time','Stream length','Avg Viewers','Peak viewers','Followers gained','Followers per hour','Views','Views per hour']] nlss_df = result nlss_df.loc[50] nlss_df[70:85] nlss_df.loc[nlss_df['Date']=="Wednesday 8th February 2017 22:15"] ``` # Let's Explore Our stats have been compiled. Now let's look around. Which show had most peak viewers? ``` mpv = nlss_df.loc[nlss_df['Peak viewers'].idxmax()] print("Date:", mpv["Date"]) print("Peak viewers:", mpv['Peak viewers']) print("Peak percentage:", (mpv['Peak viewers']/mpv['Views'])*100) print("Total viewers:", mpv['Views']) print("Games:", mpv['Docket']) nlss_df.loc[nlss_df['Peak viewers'].idxmax()] nlss_df.head() ``` # NLSS Dataframe ``` len(nlss_df) nlss_df.head() nlss_df.tail() ``` 8/25/2017 - 2/25/2013 # Current Goals I'm working on an ipynb called FullCSV. I got this by contacting the owner of Sullygnome.com. This CSV goes back further than the ones I've been working with and so will be helpful to use. However, there are differences in how it is formated compared to the CSVs used here. I'm currently working to set the dates up in the same format so I can add in the stats from FullCSV not found here. My new major issue is rather recent. Twitch recently (earlier this week as of writing) [changed their API](https://blog.twitch.tv/the-new-twitch-api-be3fb2b078e6), breaking most existing apps using it. This new update requires new types of authenication. I'm working on learning the new API, however my formerly working Twitch comment downloader is not in working order yet. As far as I can tell, none of the comment downloaders I found on Github currently work, but a few developers are working on updating them to the newest API. I converted a file of Twitch emote commands from Twitch's API site. I will attempt to find user channel specific emote commands and combine it with list. I can then use this master list to search for or omit emotes from analysis. # Sharability of Data I've been in contact with the people who created the NLSS Docket list and the CSVs. They both gave me permission to use and publicly release the data in my project. I will work on organizing a cleaned up folder of my data to publish on Github.
github_jupyter
# Boltzmann Machines Notebook ini berdasarkan kursus __Deep Learning A-Z™: Hands-On Artificial Neural Networks__ di Udemy. [Lihat Kursus](https://www.udemy.com/deeplearning/). ## Informasi Notebook - __notebook name__: `taruma_udemy_boltzmann` - __notebook version/date__: `1.0.0`/`20190730` - __notebook server__: Google Colab - __python version__: `3.6` - __pytorch version__: `1.1.0` ``` #### NOTEBOOK DESCRIPTION from datetime import datetime NOTEBOOK_TITLE = 'taruma_udemy_boltzmann' NOTEBOOK_VERSION = '1.0.0' NOTEBOOK_DATE = 1 # Set 1, if you want add date classifier NOTEBOOK_NAME = "{}_{}".format( NOTEBOOK_TITLE, NOTEBOOK_VERSION.replace('.','_') ) PROJECT_NAME = "{}_{}{}".format( NOTEBOOK_TITLE, NOTEBOOK_VERSION.replace('.','_'), "_" + datetime.utcnow().strftime("%Y%m%d_%H%M") if NOTEBOOK_DATE else "" ) print(f"Nama Notebook: {NOTEBOOK_NAME}") print(f"Nama Proyek: {PROJECT_NAME}") #### System Version import sys, torch print("versi python: {}".format(sys.version)) print("versi pytorch: {}".format(torch.__version__)) #### Load Notebook Extensions %load_ext google.colab.data_table #### Download dataset # ref: https://grouplens.org/datasets/movielens/ !wget -O boltzmann.zip "https://sds-platform-private.s3-us-east-2.amazonaws.com/uploads/P16-Boltzmann-Machines.zip" !unzip boltzmann.zip #### Atur dataset path DATASET_DIRECTORY = 'Boltzmann_Machines/' def showdata(dataframe): print('Dataframe Size: {}'.format(dataframe.shape)) return dataframe ``` # STEP 1-5 DATA PREPROCESSING ``` # Importing the libraries import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.parallel import torch.optim as optim import torch.utils.data from torch.autograd import Variable movies = pd.read_csv(DATASET_DIRECTORY + 'ml-1m/movies.dat', sep='::', header=None, engine='python', encoding='latin-1') showdata(movies).head(10) users = pd.read_csv(DATASET_DIRECTORY + 'ml-1m/users.dat', sep='::', header=None, engine='python', encoding='latin-1') showdata(users).head(10) ratings = pd.read_csv(DATASET_DIRECTORY + 'ml-1m/ratings.dat', sep='::', header=None, engine='python', encoding='latin-1') showdata(ratings).head(10) # Preparing the training set and the test set training_set = pd.read_csv(DATASET_DIRECTORY + 'ml-100k/u1.base', delimiter='\t') training_set = np.array(training_set, dtype='int') test_set = pd.read_csv(DATASET_DIRECTORY + 'ml-100k/u1.test', delimiter='\t') test_set = np.array(test_set, dtype='int') # Getting the number of users and movies nb_users = int(max(max(training_set[:, 0]), max(test_set[:, 0]))) nb_movies = int(max(max(training_set[:, 1]), max(test_set[:, 1]))) # Converting the data into an array with users in lines and movies in columns def convert(data): new_data = [] for id_users in range(1, nb_users+1): id_movies = data[:, 1][data[:, 0] == id_users] id_ratings = data[:, 2][data[:, 0] == id_users] ratings = np.zeros(nb_movies) ratings[id_movies - 1] = id_ratings new_data.append(list(ratings)) return new_data training_set = convert(training_set) test_set = convert(test_set) # Converting the data into Torch tensors training_set = torch.FloatTensor(training_set) test_set = torch.FloatTensor(test_set) training_set. ``` # STEP 6 ``` # Converting the ratings into binary ratings 1 (Liked) or 0 (Not Liked) training_set[training_set == 0] = -1 training_set[training_set == 1] = 0 training_set[training_set == 2] = 0 training_set[training_set >= 3] = 1 test_set[test_set == 0] = -1 test_set[test_set == 1] = 0 test_set[test_set == 2] = 0 test_set[test_set >= 3] = 1 training_set ``` # STEP 7 - 10 Building RBM Object ``` # Creating the architecture of the Neural Network # nv = number visible nodes, nh = number hidden nodes class RBM(): def __init__(self, nv, nh): self.W = torch.randn(nh, nv) self.a = torch.randn(1, nh) self.b = torch.randn(1, nv) def sample_h(self, x): wx = torch.mm(x, self.W.t()) activation = wx + self.a.expand_as(wx) p_h_given_v = torch.sigmoid(activation) return p_h_given_v, torch.bernoulli(p_h_given_v) def sample_v(self, y): wy = torch.mm(y, self.W) activation = wy + self.b.expand_as(wy) p_v_given_h = torch.sigmoid(activation) return p_v_given_h, torch.bernoulli(p_v_given_h) def train(self, v0, vk, ph0, phk): self.W += (torch.mm(v0.t(), ph0) - torch.mm(vk.t(), phk)).t() self.b += torch.sum((v0 - vk), 0) self.a += torch.sum((ph0 - phk), 0) ``` # STEP 11 ``` nv = len(training_set[0]) nh = 100 batch_size = 100 rbm = RBM(nv, nh) ``` # STEP 12-13 ``` # Training the RBM nb_epochs = 10 for epoch in range(1, nb_epochs + 1): train_loss = 0 s = 0. for id_user in range(0, nb_users - batch_size, batch_size): vk = training_set[id_user:id_user+batch_size] v0 = training_set[id_user:id_user+batch_size] ph0,_ = rbm.sample_h(v0) for k in range(10): _,hk = rbm.sample_h(vk) _,vk = rbm.sample_v(hk) vk[v0<0] = v0[v0<0] phk,_ = rbm.sample_h(vk) rbm.train(v0, vk, ph0, phk) train_loss += torch.mean(torch.abs(v0[v0>=0] - vk[v0>=0])) s += 1. print('epoch: '+str(epoch)+' loss: '+str(train_loss/s)) ``` # STEP 14 ``` # Testing the RBM test_loss = 0 s = 0. for id_user in range(nb_users): v = training_set[id_user:id_user+1] vt = test_set[id_user:id_user+1] if len(vt[vt>=0]) > 0: _,h = rbm.sample_h(v) _,v = rbm.sample_v(h) test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0])) s += 1. print('test loss: '+str(test_loss/s)) ```
github_jupyter
As a demonstration, create an ARMA22 model drawing innovations from there different distributions, a bernoulli, normal and inverse normal. Then build a keras/tensorflow model for the 1-d scattering transform to create "features", use these features to classify which model for the innovations was used. ``` from blusky.blusky_models import build_model_1d import matplotlib.pylab as plt import numpy as np from scipy.stats import bernoulli, norm, norminvgauss def arma22(N, alpha, beta, rnd, eps=0.5): inov = rnd.rvs(2*N) x = np.zeros(2*N) # arma22 mode for i in range(2,N*2): x[i] = (alpha[0] * x[i-1] + alpha[1]*x[i-2] + beta[0] * inov[i-1] + beta[1] * inov[i-2] + eps * inov[i]) return x[N:] N = 512 k = 10 alpha = [0.99, -0.1] beta = [0.2, 0.0] eps = 1 series = np.zeros((24*k, N)) y = np.zeros(24*k) for i in range(8*k): series[i, :] = arma22(N, alpha, beta, norm(1.0), eps=eps) y[i] = 0 for i in range(8*k, 16*k): series[i, :] = arma22(N, alpha, beta, norminvgauss(1,0.5), eps=eps) y[i] = 1 for i in range(16*k, 24*k): series[i, :] = arma22(N, alpha, beta, bernoulli(0.5), eps=eps)*2 y[i] = 2 plt.plot(series[3*k,:200], '-r') plt.plot(series[8*k,:200]) plt.plot(series[-3*k,:200]) plt.legend(['normal', 'inverse normal', 'bernoulli']) #Hold out data: k = 8 hodl_series = np.zeros((24*k, N)) hodl_y = np.zeros(24*k) for i in range(8*k): hodl_series[i, :] = arma22(N, alpha, beta, norm(1.0), eps=eps) hodl_y[i] = 0 for i in range(8*k, 16*k): hodl_series[i, :] = arma22(N, alpha, beta, norminvgauss(1,0.5), eps=eps) hodl_y[i] = 1 for i in range(16*k, 24*k): hodl_series[i, :] = arma22(N, alpha, beta, bernoulli(0.5), eps=eps)*2 hodl_y[i] = 2 # hold out data plt.plot(hodl_series[0,:200], '-r') plt.plot(hodl_series[8*k,:200]) plt.plot(hodl_series[16*k,:200]) plt.legend(['normal', 'inverse normal', 'bernoulli']) ``` The scattering transform reduces the timeseries to a set of features, which we use for classification. The seperation between the series is more obvious looking at the log- of the features (see below). A support vector machine has an easy time classifying these processes. ``` base_model = build_model_1d(N, 7,6, concatenate=True) result = base_model.predict(hodl_series) plt.semilogy(np.mean(result[:,0,:], axis=0), '-r') plt.semilogy(np.mean(result[8*k:16*k,0,:], axis=0), '-b') plt.semilogy(np.mean(result[16*k:,0,:], axis=0), '-g') from sklearn.svm import SVC from sklearn.metrics import classification_report model = build_model_1d(N, 7, 6, concatenate=True) result = np.log(model.predict(series)) X = result[:,0,:] rdf = SVC() rdf.fit(X,y) hodl_result = np.log(model.predict(hodl_series)) hodl_X = hodl_result[:,0,:] y_pred = rdf.predict(hodl_X) cls1 = classification_report(hodl_y, y_pred) print(cls1) ``` Blusky build_model_1d creates a regular old keras model, which you can use like another, think VGG16 etc. The order (order < J) defines the depth of the network. If you want a deeper network, increase this parameter. Here we attach a set of fully connected layers to classify like we did previously with the SVM. Dropping in a batch normalization here, seeems to be important for regularizong the problem. ``` from tensorflow.keras import Input, Model import tensorflow.keras.backend as K from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Lambda from tensorflow.keras.utils import to_categorical early_stopping = EarlyStopping(monitor="val_loss", patience=50, verbose=True, restore_best_weights=True) J = 7 order = 6 base_model = build_model_1d(N, J, order, concatenate=True) dnn = Flatten()(base_model.output) # let's add the "log" here like we did above dnn = Lambda(lambda x : K.log(x))(dnn) dnn = BatchNormalization()(dnn) dnn = Dense(32, activation='linear', name='dnn1')(dnn) dnn = Dense(3, activation='softmax', name='softmax')(dnn) deep_model_1 = Model(inputs=base_model.input, outputs=dnn) deep_model_1.compile(optimizer='rmsprop', loss='categorical_crossentropy') history_1 = deep_model_1.fit(series, to_categorical(y), validation_data=(hodl_series, to_categorical(hodl_y)), callbacks=[early_stopping], epochs=200) y_pred = deep_model_1.predict(hodl_series) cls_2 = classification_report(hodl_y, np.argmax(y_pred, axis=1)) base_model.output plt.plot(history_1.history['loss'][-100:]) plt.plot(history_1.history['val_loss'][-100:]) print(cls_2) ```
github_jupyter
# Generating an ROC Curve This notebook is meant to be be an introduction to generating an ROC curve for multi-class prediction problems and the code comes directly from an [Scikit-Learn demo](http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html). Please issue a comment on my Github account if you would like to suggest any changes to this notebook. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.cross_validation import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) ############################################################################## # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() ############################################################################## # Plot ROC curves for the multiclass problem # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), linewidth=2) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), linewidth=2) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show() ```
github_jupyter
# CH. 8 - Market Basket Analysis ## Activities #### Activity 8.01: Load and Prep Full Online Retail Data ``` import matplotlib.pyplot as plt import mlxtend.frequent_patterns import mlxtend.preprocessing import numpy import pandas online = pandas.read_excel( io="./Online Retail.xlsx", sheet_name="Online Retail", header=0 ) online['IsCPresent'] = ( online['InvoiceNo'] .astype(str) .apply(lambda x: 1 if x.find('C') != -1 else 0) ) online1 = ( online .loc[online["Quantity"] > 0] .loc[online['IsCPresent'] != 1] .loc[:, ["InvoiceNo", "Description"]] .dropna() ) invoice_item_list = [] for num in list(set(online1.InvoiceNo.tolist())): tmp_df = online1.loc[online1['InvoiceNo'] == num] tmp_items = tmp_df.Description.tolist() invoice_item_list.append(tmp_items) online_encoder = mlxtend.preprocessing.TransactionEncoder() online_encoder_array = online_encoder.fit_transform(invoice_item_list) online_encoder_df = pandas.DataFrame( online_encoder_array, columns=online_encoder.columns_ ) ## COL in different order online_encoder_df.loc[ 20125:20135, online_encoder_df.columns.tolist()[100:110] ] ``` #### Activity 8.02: Apriori on the Complete Online Retail Data Set ``` mod_colnames_minsupport = mlxtend.frequent_patterns.apriori( online_encoder_df, min_support=0.01, use_colnames=True ) mod_colnames_minsupport.loc[0:6] mod_colnames_minsupport[ mod_colnames_minsupport['itemsets'] == frozenset( {'10 COLOUR SPACEBOY PEN'} ) ] mod_colnames_minsupport['length'] = ( mod_colnames_minsupport['itemsets'].apply(lambda x: len(x)) ) ## item set order different mod_colnames_minsupport[ (mod_colnames_minsupport['length'] == 2) & (mod_colnames_minsupport['support'] >= 0.02) & (mod_colnames_minsupport['support'] < 0.021) ] mod_colnames_minsupport.hist("support", grid=False, bins=30) plt.xlabel("Support of item") plt.ylabel("Number of items") plt.title("Frequency distribution of Support") plt.show() ``` #### Activity 8.03: Find the Association Rules on the Complete Online Retail Data Set ``` rules = mlxtend.frequent_patterns.association_rules( mod_colnames_minsupport, metric="confidence", min_threshold=0.6, support_only=False ) rules.loc[0:6] print("Number of Associations: {}".format(rules.shape[0])) rules.plot.scatter("support", "confidence", alpha=0.5, marker="*") plt.xlabel("Support") plt.ylabel("Confidence") plt.title("Association Rules") plt.show() rules.hist("lift", grid=False, bins=30) plt.xlabel("Lift of item") plt.ylabel("Number of items") plt.title("Frequency distribution of Lift") plt.show() rules.hist("leverage", grid=False, bins=30) plt.xlabel("Leverage of item") plt.ylabel("Number of items") plt.title("Frequency distribution of Leverage") plt.show() plt.hist(rules[numpy.isfinite(rules['conviction'])].conviction.values, bins = 30) plt.xlabel("Conviction of item") plt.ylabel("Number of items") plt.title("Frequency distribution of Conviction") plt.show() ```
github_jupyter
# Welcome to Jupyter Notebooks! Author: Shelley Knuth Date: 23 August 2019 Purpose: This is a general purpose tutorial to designed to provide basic information about Jupyter notebooks ## Outline 1. General information about notebooks 1. Formatting text in notebooks 1. Formatting mathematics in notebooks 1. Importing graphics 1. Plotting ## General Information about Notebooks ### What is a Jupyter Notebook? It's an interactive web platform that allows one to create and edit live code, add text descriptions, and visualizations in a document that can be easily shared and displayed. ### How to work with a Notebook To run a cell, hit "shift" and "enter" at the same time Don't be alarmed if your notebook runs for awhile - indicated by [*] Sometimes takes awhile ### Different cell types Code, Markdown are the two I use most frequently ### Exercise Write one sentence on what you are planning to do this weekend in a cell. ### Opening, saving notebooks Opening: File -> New Notebook -> Python 3 Saving: File -> Save as -> Save and Checkpoint (Ctrl + S) Printing: File -> Print Preview Download: File -> Download as PDF (or others) ## Keyboard shortcuts Toggle between edit and command mode with Esc and Enter, respectively. Once in command mode: Scroll up and down your cells with your Up and Down keys. Press A or B to insert a new cell above or below the active cell. M will transform the active cell to a Markdown cell. Y will set the active cell to a code cell. D + D (D twice) will delete the active cell. Z will undo cell deletion. Hold Shift and press Up or Down to select multiple cells at once. With multiple cells selected, Shift + M will merge your selection. Ctrl + Shift + -, in edit mode, will split the active cell at the cursor. You can also click and Shift + Click in the margin to the left of your cells to select them. (from https://www.dataquest.io/blog/jupyter-notebook-tutorial/) ## Formatting text in notebooks Jupyter notebooks are __really__ cool! Jupyter notebooks are _really_ cool! Two spaces after text gives you a newline! ### Headings # Jupyter notebooks are really cool! ## Do you know what else is cool? ### Turtles! #### And Bon Jovi! ### Code The best program to use for this is the `grep` command ### Text color and size The sky is <font color = blue, size = 30>blue!</font> Sometimes the <font color = blue>color</font> doesn't turn out <font size=30> WELL</font> ### Indent or list your text > This is how! - This is how! - This is how! 1. This is how! * This is also how! * This is also how! ### Hyperlinks Sometimes copy and paste is just fine too! https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet [I'm an inline-style link](https://www.google.com) [I'm a reference-style link][Arbitrary case-insensitive reference text] [I'm a relative reference to a repository file](../blob/master/LICENSE) [You can use numbers for reference-style link definitions][1] Or leave it empty and use the [link text itself]. URLs and URLs in angle brackets will automatically get turned into links. http://www.example.com or <http://www.example.com> and sometimes example.com (but not on Github, for example). Some text to show that the reference links can follow later. [arbitrary case-insensitive reference text]: https://www.mozilla.org [1]: http://slashdot.org [link text itself]: http://www.reddit.com (from https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) ## Mathematical Equations in Notebooks $F=ma$ This is an equation, $x=y+z$, where $y=10$ and $z=20$ ### Superscripts and Subscripts $y = x^3 + x^2 + 3x$ $F_g = m g$ ### Grouping $6.022\times 10^{23}$ . ### Greek Letters $\pi = 3.1415926$ $\Omega = 10$ $\delta$ ### Special Symbols $\pm$, $\gg$, $\ll$, $\infty$ $i = \sqrt{-1}$ $\int_a^b$ ## Fractions and Derivatives Fractions $\frac{1}{2}$ Derivatives $\frac{dm}{dt}$, $\frac{\partial m}{\partial t}$ ### Matrices $$\begin{matrix} a & b \\ c & d \end{matrix}$$ ### Exercise Write out an equation where the total derivative of x over y is equal to the square root of 10 added to 7/8 pi $\frac{dx}{dy} = \sqrt{10} + \pi$ ### Exercise Write out an equation where x sub j is equal to a 2x2 matrix containing 10, 20, 30, and 40 $x_j = \begin{matrix} 10 & 20 \\ 30 & 40 \end{matrix}$ ## Importing Graphics Easy way: Drag and drop! Or "Edit -> Insert image" when in Markdown Harder ways: Python: ``` from IPython.display import Image Image("bonjovi.jpg") ``` HTML: <img src="bonjovi.jpg"> ## Basic Programming with Python ### Print statements ``` print("Hello, World!") ``` Look at how the input changed (to the left of the cell). Look at the output! (This and several cells from https://www.dataquest.io/blog/jupyter-notebook-tutorial/) Anything run in the kernal persists in the notebook Can run code and import libraries in the cells ### Variables in Python * Variables are not declared * Variables are created at assignment time * Variable type determined implicitly via assignment x=2 Int x=2.0 . Float Z="hello" str (single or double quotes) z=True Boolean Note capital "T" or "F" * Can convert types using conversion functions: int(), float(), str(), bool() * Python is case sensitive * Check variable type using type function (from https://github.com/ResearchComputing/Python_Spring_2019/blob/master/session1_overview/session1_slides.pdf) ``` z=10.0 print('z is: ', type(z) ) x=int(43.4) print(x) ``` Arithmetic in Python respects the order of operations * Addition: + * Subtraction: - * Multiplication: * * Division: / (returns float) * Floor Division: // (returns int or float; rounds down) * Mod: % (3%2 -> 1) * Exponentiation: ** 2**4 -> 16) Can concatenate strings using "+" (from https://github.com/ResearchComputing/Python_Spring_2019/blob/master/session1_overview/session1_slides.pdf) ``` x='hello '+'there' print(x) ``` ### Lists Multiple Values can be grouped into a list ``` - lists - basic plotting with matplotlib - arrays and numpy; doing calculations - plotting using numpy - importing data from csv files mylist=[1, 2, 10] print(mylist) ``` * List elements accessed with [] notation * Element numbering starts at 0 ``` print(mylist[1]) ``` * Lists can contain different variable types ``` mylist=[1, 'two', 10.0] print(mylist) ```
github_jupyter
<a href="https://colab.research.google.com/github/DingLi23/s2search/blob/pipelining/pipelining/pdp-exp1/pdp-exp1_cslg-rand-5000_plotting.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ### Experiment Description Produce PDP for a randomly picked data from cslg. > This notebook is for experiment \<pdp-exp1\> and data sample \<cslg-rand-5000\>. ### Initialization ``` %load_ext autoreload %autoreload 2 import numpy as np, sys, os in_colab = 'google.colab' in sys.modules # fetching code and data(if you are using colab if in_colab: !rm -rf s2search !git clone --branch pipelining https://github.com/youyinnn/s2search.git sys.path.insert(1, './s2search') %cd s2search/pipelining/pdp-exp1/ pic_dir = os.path.join('.', 'plot') if not os.path.exists(pic_dir): os.mkdir(pic_dir) ``` ### Loading data ``` sys.path.insert(1, '../../') import numpy as np, sys, os, pandas as pd from s2search_score_pdp import pdp_based_importance, apply_order sample_name = 'cslg-rand-5000' f_list = ['title', 'abstract', 'venue', 'authors', 'year', 'n_citations'] pdp_xy = {} pdp_metric = pd.DataFrame(columns=['feature_name', 'pdp_range', 'pdp_importance']) for f in f_list: file = os.path.join('.', 'scores', f'{sample_name}_pdp_{f}.npz') if os.path.exists(file): data = np.load(file) sorted_pdp_data = apply_order(data) feature_pdp_data = [np.mean(pdps) for pdps in sorted_pdp_data] pdp_xy[f] = { 'y': feature_pdp_data, 'numerical': True } if f == 'year' or f == 'n_citations': pdp_xy[f]['x'] = np.sort(data['arr_1']) else: pdp_xy[f]['y'] = feature_pdp_data pdp_xy[f]['x'] = list(range(len(feature_pdp_data))) pdp_xy[f]['numerical'] = False pdp_metric.loc[len(pdp_metric.index)] = [f, np.max(feature_pdp_data) - np.min(feature_pdp_data), pdp_based_importance(feature_pdp_data, f)] pdp_xy[f]['weird'] = feature_pdp_data[len(feature_pdp_data) - 1] > 30 print(pdp_metric.sort_values(by=['pdp_importance'], ascending=False)) ``` ### PDP ``` import matplotlib.pyplot as plt categorical_plot_conf = [ { 'xlabel': 'Title', 'ylabel': 'Scores', 'pdp_xy': pdp_xy['title'] }, { 'xlabel': 'Abstract', 'pdp_xy': pdp_xy['abstract'] }, { 'xlabel': 'Authors', 'pdp_xy': pdp_xy['authors'] }, { 'xlabel': 'Venue', 'pdp_xy': pdp_xy['venue'], 'zoom': { 'inset_axes': [0.15, 0.45, 0.47, 0.47], 'x_limit': [4900, 5050], 'y_limit': [-9, 7], 'connects': [True, True, False, False] } }, ] numerical_plot_conf = [ { 'xlabel': 'Year', 'ylabel': 'Scores', 'pdp_xy': pdp_xy['year'] }, { 'xlabel': 'Citation Count', 'pdp_xy': pdp_xy['n_citations'], 'zoom': { 'inset_axes': [0.4, 0.2, 0.47, 0.47], 'x_limit': [-100, 500], 'y_limit': [-7.5, -6.2], 'connects': [True, False, False, True] } } ] def pdp_plot(confs, title): fig, axes = plt.subplots(nrows=1, ncols=len(confs), figsize=(20, 5), dpi=100) subplot_idx = 0 # plt.suptitle(title, fontsize=20, fontweight='bold') # plt.autoscale(False) for conf in confs: axess = axes if len(confs) == 1 else axes[subplot_idx] axess.plot(conf['pdp_xy']['x'], conf['pdp_xy']['y']) axess.grid(alpha = 0.4) if ('ylabel' in conf): axess.set_ylabel(conf.get('ylabel'), fontsize=20, labelpad=10) axess.set_xlabel(conf['xlabel'], fontsize=16, labelpad=10) if not (conf['pdp_xy']['weird']): if (conf['pdp_xy']['numerical']): axess.set_ylim([-9, -6]) pass else: axess.set_ylim([-15, 10]) pass if 'zoom' in conf: axins = axess.inset_axes(conf['zoom']['inset_axes']) axins.plot(conf['pdp_xy']['x'], conf['pdp_xy']['y']) axins.set_xlim(conf['zoom']['x_limit']) axins.set_ylim(conf['zoom']['y_limit']) axins.grid(alpha=0.3) rectpatch, connects = axess.indicate_inset_zoom(axins) connects[0].set_visible(conf['zoom']['connects'][0]) connects[1].set_visible(conf['zoom']['connects'][1]) connects[2].set_visible(conf['zoom']['connects'][2]) connects[3].set_visible(conf['zoom']['connects'][3]) subplot_idx += 1 pdp_plot(categorical_plot_conf, "PDPs for four categorical features") plt.savefig(os.path.join('.', 'plot', f'{sample_name}-categorical.png'), facecolor='white', transparent=False, bbox_inches='tight') # second fig pdp_plot(numerical_plot_conf, "PDPs for two numerical features") plt.savefig(os.path.join('.', 'plot', f'{sample_name}-numerical.png'), facecolor='white', transparent=False, bbox_inches='tight') ```
github_jupyter
# Alzhippo Pr0gress ##### Possible Tasks - **Visualizing fibers** passing through ERC and hippo, for both ipsi and contra cxns (4-figs) (GK) - **Dilate hippocampal parcellations**, to cover entire hippocampus by nearest neighbour (JV) - **Voxelwise ERC-to-hippocampal** projections + clustering (Both) ## Visulaizating fibers 1. Plot group average connectome 2. Find representative subject X (i.e. passes visual inspection match to the group) 3. Visualize fibers with parcellation 4. Repeat 3. on dilated parcellation 5. If connections appear more symmetric in 4., regenerate graphs with dilated parcellation ### 1. Plot group average connectome ``` import numpy as np import networkx as nx import nibabel as nib import scipy.stats as stats import matplotlib.pyplot as plt from nilearn import plotting import os import seaborn as sns import pandas %matplotlib notebook def matrixplotter(data, log=True, title="Connectivity between ERC and Hippocampus"): plotdat = np.log(data + 1) if log else data plt.imshow(plotdat) labs = ['ERC-L', 'Hippo-L-noise', 'Hippo-L-tau', 'ERC-R', 'Hippo-R-noise', 'Hippo-R-tau'] plt.xticks(np.arange(0, 6), labs, rotation=40) plt.yticks(np.arange(0, 6), labs) plt.title(title) plt.colorbar() plt.show() avg = np.load('../data/connection_matrix.npy') matrixplotter(np.mean(avg, axis=2)) ``` ### 2. Find representative subject ``` tmp = np.reshape(avg.T, (355, 36)) tmp[0] corrs = np.corrcoef(tmp)[-1] corrs[corrs == 1] = 0 bestfit = int(np.where(corrs == np.max(corrs))[0]) print("Most similar graph: {}".format(bestfit)) dsets = ['../data/graphs/BNU1/combined_erc_hippo_labels/', '../data/graphs/BNU3/', '../data/graphs/HNU1/'] files = [os.path.join(d,f) for d in dsets for f in os.listdir(d)] graph_fname = files[bestfit] gx = nx.read_weighted_edgelist(graph_fname) adjx = np.asarray(nx.adjacency_matrix(gx).todense()) matrixplotter(adjx) print(graph_fname) ``` **N.B.**: The fibers from the subject/session shown above were SCP'd from the following location on Compute Canada's Cedar machine by @gkiar. They are too large for a git repository, but they were downloaded to the `data/fibers/` directory from the root of this project. Please @gkiar him if you'd like access to this file, in lieu of better public storage: > /project/6008063/gkiar/ndmg/connectomics/ndmg-d/HNU1/fibers/sub-0025444_ses-2_dwi_fibers.npz ### 3. Visualize fibers with parcellation Because I don't have VTK/Dipy locally, this was done in Docker with the script in `./code/npz2trackviz.py` and submitted to the scheduler with `./code/npzdriver.sh`. The command to run this in Docker, from the base directory of this project was: docker run -ti \ -v /Users/greg/code/gkiar/alzhippo/data/:/data \ -v /Users/greg/code/gkiar/alzhippo/code/:/proj \ --entrypoint python2.7 \ bids/ndmg:v0.1.0 \ /proj/npz2trackviz.py /data/fibers/sub-0025444_ses-2_dwi_fibers.npz /data/combined_erc_hippo_labels.nii.gz The resulting `.trk` files were viewed locally with [TrackVis](http://www.trackvis.org/) to make the screenshot below.
github_jupyter
# Course introduction ## A. Overview ### Am I ready to take this course? Yes. Probably. Some programming experience will help, but is not required. If you have no programming experience, I strongly encourage you to go through the first handful of modules on the [Codecademy Python course](https://www.codecademy.com/learn/learn-python) as soon as possible. While that course utilizes Python 2, we will be using Python 3 in our course here. BUT...many of the basics are identical between the two versions. There are <b>a lot</b> of online resources that you can use to supplement things we learn in class. Some examples are: * [python.org Tutorial](https://docs.python.org/3/tutorial/index.html) * [Learn Python](https://www.learnpython.org/) * [Google](http://google.com) (Just type in your question and follow the first [stackoverflow](http://stackoverflow.com) link. This is surprisingly effective; do this first.) ### What computational resources do I need for class? You will need a laptop that will provide you access to the course (i.e. internet access) and a Python environment to follow along. ### How is this for geosciences specifically? The goal of this class is to provide information for all fields in geoscience. To that end, I will try to cover topics from geology, geography, atmospheric sciences, and oceanography. Specifically, I will focus on 1D timeseries, and 2D geospatial (i.e., on a map) analysis. If you have any topics you would like to cover, please let me know, and I will do my best to accommodate. ## Class setup ### Class format We will go through course materials during class time. You should bring a computer to class so that you can follow along and participate in exercises. Also, course materials are interactive, so you can learn by running code snippets as we go and asking questions. Much like learning a new spoken language, hands-on coding is one the <b>best</b> ways to learn a new language. ### Course materials The course materials are available in the [class repository](https://github.com/snifflesnrumjum/python4geosciences). They are in the form of [Jupyter notebooks](http://jupyter.org/). More information on notebooks in the next section. You'll do your work either on your own computer, in a Google Colab notebook, or through the VOAL provided by Texas A&M University. To access the VOAL when off campus, you need to first set up a VPN connection. Set this up for your computer by visiting `https://connect.tamu.edu` and follow instructions there. You'll need to sign in with your NetID, and click on the little blue link that says "AnyConnect VPN" if and when you find that "Web-based installation was unsuccessful" to install Cisco AnyConnect (you will no longer use the web-based installer after this). When you open the Cisco application on your computer, you will need to fill in "connect.tamu.edu" in the little box, then use your NetID and university password to connect. Then you can run this application to use your computer as if you are on campus. ### Course textbook There is no textbook for the course. But if you'd like an outside resource, here are three recommendations: 1. Learning Python by Mark Lutz (available electronically through TAMU Library http://library.tamu.edu/) 2. Beginning Python by Magnus Lie Hetland (available electronically through TAMU Library http://library.tamu.edu/) 3. Allen Downey has written a number of books on Python and related scientific subjects. And as a bonus, they are free (digital versions): http://greenteapress.com/wp/. In particular you would want to check out Think Python (2nd edition). ## B. Jupyter notebooks This file format makes it easy to seamlessly combine text and code. The text can be plain or formatted with [Markdown](https://daringfireball.net/projects/markdown/). The code can be written in over 40 languages including Python, R, and Scala. Most importantly, the code can be interacted with when the notebook is opened in a local (that is, on your computer) iPython server. Alternatively, it can simply be viewed through a github repository (like [this very notebook](https://github.com/snifflesnrumjum/python4geosciences/blob/master/materials/0_intro.ipynb)) or through [nbviewer](http://nbviewer.ipython.org/). You'll be able to run class materials (in the form of Jupyter notebooks) on your own computer, on Google Colab or the VOAL via your web browser as well as create and work on homework assignments. If you prefer, you are welcome to run Python on your own computer, but you will need to do that mostly on your own. If you go that route, I recommend using Python 3 (which we will be using in class) and a distribution from [Anaconda](https://www.anaconda.com/products/individual). ### Create a new notebook Start up your local notebook server in your new repo and create a new Jupyter notebook from the local server page. ### Choose syntax for a cell Notebooks are built of cells. Cells can have multiple built-in formats including code and Markdown for text. You can select the desired format from a dropdown menu at the top. If you want to type words, use "Markdown"; if you want to write code, choose "code". ### Move between cells To run a given cell, type `[shift-enter]` which active in that cell. You can run all of the cells with Cell > Run all; other variations are available in that drop down menu. ### Homework We'll discuss homework soon and go through details. It will be in the form of Jupyter notebooks and will be submitted through the Canvas LMS. --- The material below is bonus for any students that are interested in using a terminal window on their own computer for running Python. We may go through it in class. ## Command-line interface A command-line interface is a way to interact with your computer using text instead of a Graphical User Interface (GUI), a GUI being visually based with icons etc. We will use these in this class. On a Macintosh or Linux machine, this is a terminal window. On a PC this is often called a command prompt. Here are some commonly-used commands: * `cd [path]`: change directory from current location to [path]. `cd ..` can be used to move up a single directory, and `cd ../..` moves up two directories, etc. * `pwd`: print working directory, as in write out the current location in the terminal window. * `ls`: list files in current directory. `ls -l` list files in long format to include more information, `ls -a` to list all files even those that are usually not shown because the have a `.` in front, `ls -h` to show file sizes in human readable format. Flags can always be combined to use multiple options at once, as in `ls -ah` to show all files in human readable format. * [tab]: Tab completion. You can always push tab in the terminal window to see available options. As you have some letters entered and push tab, the options will be limited to those that fit the pattern you have started. * `mkdir [dirname]`: make directory called dirname. * `rm [filename]`: remove a file called filename. To remove a directory called dirname, use `rm -r [dirname]`. ## Short git and GitHub tutorial (optional) Class materials are available on a [GitHub](http://github.org) repository. GitHub is a way to share and access code online which has been version-controlled using git. Version control allows changes in code to be tracked over time; this is important for reproducibility, retrieving code in case of accidents, and working on code in groups. Git is one way to version control your code — other methods include subversion (svn), cvs, and mercurial. More information on this is provided below. Remember: you can always google to learn more! Google is an infinite resource that you can ask at any time of the day. Here we summarize a brief overview of how to use git. GitHub has a [cheatsheet](https://education.github.com/git-cheat-sheet-education.pdf) available. To get changes in a file in a local version of a repository tracked, saved, and then shared with the internet (on github), do the following: * `git add` to initially tell the system to track your file and subsequently to tell the system that you want to take into account new changes to the file (you can also add more than one file in this process). Then * `git commit -m [commit note]` to save the changes. Then * `git push` to share the changes with the version of your repository on github. Now you should be able to look at your repo on github and see your updated file there. **GitHub Desktop** After you have made your repository on GitHub (the website), you should clone it to GitHub Desktop (which is on your local machine). (This should be very easy if you are properly signed into your github account in GitHub Desktop.) Then to get your file tracked and pushed to GitHub (the website): * While inspecting the relevant repository, any untracked files or changes to tracked files are shown in the middle window (it is white with horizontal lines). To do the equivalent of `git add`, you should check the box of the file. * To commit your changes, fill out the form at the bottom of the same window. One short window leaves space for a "Summary" of your changes, and if you have more to say you can put it in the "Description" box. * To push your local changes out to GitHub online, use the Sync button on the upper right hand corner of the window. As a side note, this sync button is also how you should pull down changes to a repository you are following (the equivalent of `git pull`). Note that you do not want to have a directory covered by two git repositories. So for file structure for this class, for example, you might want to have one directory for the class ("Python_x89") which contains two version-controlled subdirectories: the course materials (python4geosciences) and your homework repository ("homework"). That will keep everything properly separated. ![XKCD](https://imgs.xkcd.com/comics/git.png) [Git as explained by XKCD](https://xkcd.com/1597/) ### `git status` Type this in a git-monitored subdirectory on your computer to see the status of files that are under git version control and which files are not being monitored. ### `git add` Use this to add local files to your git repository. `git add [filename]`. ### `git commit` Use this to save to your repository local file changes. First you need to add the file with `git add`, then you can `git commit -m [commit message]`, where the commit message is a concise and useful note to explain what changes you have made. You may skip the `git add` step if you would like to commit at once all changes that have been made (you can see what changes would be committed by first consulting `git status`) with `git commit -am [commit message]`. The `-a` flag stands for "all" as in commit all changes. ### `git push` To move your local changes to github so that they are saved and also for sharing with others, you need to `git push`. After making whatever commits you want to make, you can run this command to finalize your changes on github. ### `git merge` If changes have been made in multiple places (say, by another person working on the same code base, or by yourself on two different machines) between `git push`es, git will try to merge if possible — which it will do if the changes don't overlap and therefore don't require your input. If the changes do overlap, you will have to merge the two versions of the code together. You probably won't need to do this in this class. ### Get set up with GitHub You'll need an account on github if you don't already have one, and to have git installed on your computer. We will interact with github through a terminal window in class and through the website. You may also download the [GitHub Desktop application](https://desktop.github.com/) to use if you prefer, though we won't be able to help you much with the details of it. ### Create a new repo In your account on the github webpage, click on the Repositories tab and then the green "New" button in the upper right. You'll need to keep this repo public. After creating this new repo, follow the instructions on the next page for quick setup or to create a new repository on the command line. This makes it so that you have the repo both on github and on your local machine. ### Clone a repo In a terminal window in the location you want to save the repo materials, type: `git clone [repo address, e.g. https://github.com/snifflesnrumjum/python4geosciences]`.
github_jupyter
``` import re from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio.Alphabet import IUPAC from Bio.SeqFeature import SeqFeature, FeatureLocation #first 6 aas of each domain #from uniprot: NL63 (Q6Q1S2), 229e(P15423), oc43 (P36334), hku1 (Q0ZME7) #nl63 s1 domain definition: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2693060/ s1_domains = {'nl63': 'FFTCNS', '229e': 'CQTTNG', 'oc43': 'AVIGDL', 'hku1': 'AVIGDF'} s2_domains = {'nl63': 'SSDNGI', '229e': 'IIAVQP', 'oc43': 'AITTGY', 'hku1': 'SISASY'} rdrp_domains_start = {'oc43': 'SKDTNF'} rdrp_domains_end = {'oc43': 'RSAVMQ'} def write_gene_reference(gene_seq, gene_id, gene_name, gene_description, cov_type, outfile): gene_record = SeqRecord(gene_seq, id= gene_id, name= gene_name, description= gene_description) source_feature = SeqFeature(FeatureLocation(0, len(gene_seq)), type='source', qualifiers={'organsism':cov_type, "mol_type":"genomic RNA"}) gene_record.features.append(source_feature) cds_feature = SeqFeature(FeatureLocation(0, len(gene_seq)), type='CDS', qualifiers={'translation':gene_seq.translate()}) gene_record.features.append(cds_feature) SeqIO.write(gene_record, outfile, 'genbank') def make_s1_s2_reference(cov): spike_reference = '../'+str(cov)+'/config/'+str(cov)+'_spike_reference.gb' with open(spike_reference, "r") as handle: for record in SeqIO.parse(handle, "genbank"): nt_seq = record.seq aa_seq = record.seq.translate() s1_regex = re.compile(f'{s1_domains[cov]}.*(?={s2_domains[cov]})') s1_aa = s1_regex.search(str(aa_seq)).group() s1_aa_coords = [(aa.start(0), aa.end(0)) for aa in re.finditer(s1_regex, str(aa_seq))][0] s1_nt_coords = [s1_aa_coords[0]*3, s1_aa_coords[1]*3] s1_nt_seq = nt_seq[s1_nt_coords[0]: s1_nt_coords[1]] s2_regex = re.compile(f'{s2_domains[cov]}.*') s2_aa = s2_regex.search(str(aa_seq)).group() s2_aa_coords = [(aa.start(0), aa.end(0)) for aa in re.finditer(s2_regex, str(aa_seq))][0] s2_nt_coords = [s2_aa_coords[0]*3, s2_aa_coords[1]*3] s2_nt_seq = nt_seq[s2_nt_coords[0]: s2_nt_coords[1]] write_gene_reference(s1_nt_seq, record.id, str(cov)+'_S1', 'spike s1 subdomain', cov, '../'+str(cov)+'/config/'+str(cov)+'_s1_reference.gb') write_gene_reference(s2_nt_seq, record.id, str(cov)+'_S2', 'spike s2 subdomain', cov, '../'+str(cov)+'/config/'+str(cov)+'_s2_reference.gb') # covs = ['oc43', '229e', 'nl63', 'hku1'] covs = ['229e'] for cov in covs: make_s1_s2_reference(cov) def make_rdrp_reference(cov): replicase_reference = '../'+str(cov)+'/config/'+str(cov)+'_replicase1ab_reference.gb' with open(replicase_reference, "r") as handle: for record in SeqIO.parse(handle, "genbank"): nt_seq = record.seq aa_seq = record.seq.translate() rdrp_regex = re.compile(f'{rdrp_domains_start[cov]}.*{rdrp_domains_end[cov]}') rdrp_aa = rdrp_regex.search(str(aa_seq)).group() rdrp_aa_coords = [(aa.start(0), aa.end(0)) for aa in re.finditer(rdrp_regex, str(aa_seq))][0] rdrp_nt_coords = [rdrp_aa_coords[0]*3, rdrp_aa_coords[1]*3] rdrp_nt_seq = nt_seq[rdrp_nt_coords[0]: rdrp_nt_coords[1]] write_gene_reference(rdrp_nt_seq, record.id, str(cov)+'_rdrp', 'rna-dependent rna polymerase', cov, '../'+str(cov)+'/config/'+str(cov)+'_rdrp_reference.gb') ```
github_jupyter
<h1><center>How to export 🤗 Transformers Models to ONNX ?<h1><center> [ONNX](http://onnx.ai/) is open format for machine learning models. It allows to save your neural network's computation graph in a framework agnostic way, which might be particulary helpful when deploying deep learning models. Indeed, businesses might have other requirements _(languages, hardware, ...)_ for which the training framework might not be the best suited in inference scenarios. In that context, having a representation of the actual computation graph that can be shared accross various business units and logics across an organization might be a desirable component. Along with the serialization format, ONNX also provides a runtime library which allows efficient and hardware specific execution of the ONNX graph. This is done through the [onnxruntime](https://microsoft.github.io/onnxruntime/) project and already includes collaborations with many hardware vendors to seamlessly deploy models on various platforms. Through this notebook we'll walk you through the process to convert a PyTorch or TensorFlow transformers model to the [ONNX](http://onnx.ai/) and leverage [onnxruntime](https://microsoft.github.io/onnxruntime/) to run inference tasks on models from 🤗 __transformers__ ## Exporting 🤗 transformers model to ONNX --- Exporting models _(either PyTorch or TensorFlow)_ is easily achieved through the conversion tool provided as part of 🤗 __transformers__ repository. Under the hood the process is sensibly the following: 1. Allocate the model from transformers (**PyTorch or TensorFlow**) 2. Forward dummy inputs through the model this way **ONNX** can record the set of operations executed 3. Optionally define dynamic axes on input and output tensors 4. Save the graph along with the network parameters ``` import sys !{sys.executable} -m pip install --upgrade git+https://github.com/huggingface/transformers !{sys.executable} -m pip install --upgrade torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html !{sys.executable} -m pip install --upgrade onnxruntime==1.4.0 !{sys.executable} -m pip install -i https://test.pypi.org/simple/ ort-nightly !{sys.executable} -m pip install --upgrade onnxruntime-tools !rm -rf onnx/ from pathlib import Path from transformers.convert_graph_to_onnx import convert # Handles all the above steps for you convert(framework="pt", model="bert-base-cased", output=Path("onnx/bert-base-cased.onnx"), opset=11) # Tensorflow # convert(framework="tf", model="bert-base-cased", output="onnx/bert-base-cased.onnx", opset=11) ``` ## How to leverage runtime for inference over an ONNX graph --- As mentionned in the introduction, **ONNX** is a serialization format and many side projects can load the saved graph and run the actual computations from it. Here, we'll focus on the official [onnxruntime](https://microsoft.github.io/onnxruntime/). The runtime is implemented in C++ for performance reasons and provides API/Bindings for C++, C, C#, Java and Python. In the case of this notebook, we will use the Python API to highlight how to load a serialized **ONNX** graph and run inference workload on various backends through **onnxruntime**. **onnxruntime** is available on pypi: - onnxruntime: ONNX + MLAS (Microsoft Linear Algebra Subprograms) - onnxruntime-gpu: ONNX + MLAS + CUDA ``` !pip install transformers onnxruntime-gpu onnx psutil matplotlib ``` ## Preparing for an Inference Session --- Inference is done using a specific backend definition which turns on hardware specific optimizations of the graph. Optimizations are basically of three kinds: - **Constant Folding**: Convert static variables to constants in the graph - **Deadcode Elimination**: Remove nodes never accessed in the graph - **Operator Fusing**: Merge multiple instruction into one (Linear -> ReLU can be fused to be LinearReLU) ONNX Runtime automatically applies most optimizations by setting specific `SessionOptions`. Note:Some of the latest optimizations that are not yet integrated into ONNX Runtime are available in [optimization script](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers) that tunes models for the best performance. ``` # # An optional step unless # # you want to get a model with mixed precision for perf accelartion on newer GPU # # or you are working with Tensorflow(tf.keras) models or pytorch models other than bert # !pip install onnxruntime-tools # from onnxruntime_tools import optimizer # # Mixed precision conversion for bert-base-cased model converted from Pytorch # optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert', num_heads=12, hidden_size=768) # optimized_model.convert_model_float32_to_float16() # optimized_model.save_model_to_file("bert-base-cased.onnx") # # optimizations for bert-base-cased model converted from Tensorflow(tf.keras) # optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert_keras', num_heads=12, hidden_size=768) # optimized_model.save_model_to_file("bert-base-cased.onnx") # optimize transformer-based models with onnxruntime-tools from onnxruntime_tools import optimizer from onnxruntime_tools.transformers.onnx_model_bert import BertOptimizationOptions # disable embedding layer norm optimization for better model size reduction opt_options = BertOptimizationOptions('bert') opt_options.enable_embed_layer_norm = False opt_model = optimizer.optimize_model( 'onnx/bert-base-cased.onnx', 'bert', num_heads=12, hidden_size=768, optimization_options=opt_options) opt_model.save_model_to_file('bert.opt.onnx') from os import environ from psutil import cpu_count # Constants from the performance optimization available in onnxruntime # It needs to be done before importing onnxruntime environ["OMP_NUM_THREADS"] = str(cpu_count(logical=True)) environ["OMP_WAIT_POLICY"] = 'ACTIVE' from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions, get_all_providers from contextlib import contextmanager from dataclasses import dataclass from time import time from tqdm import trange def create_model_for_provider(model_path: str, provider: str) -> InferenceSession: assert provider in get_all_providers(), f"provider {provider} not found, {get_all_providers()}" # Few properties that might have an impact on performances (provided by MS) options = SessionOptions() options.intra_op_num_threads = 1 options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL # Load the model as a graph and prepare the CPU backend session = InferenceSession(model_path, options, providers=[provider]) session.disable_fallback() return session @contextmanager def track_infer_time(buffer: [int]): start = time() yield end = time() buffer.append(end - start) @dataclass class OnnxInferenceResult: model_inference_time: [int] optimized_model_path: str ``` ## Forwarding through our optimized ONNX model running on CPU --- When the model is loaded for inference over a specific provider, for instance **CPUExecutionProvider** as above, an optimized graph can be saved. This graph will might include various optimizations, and you might be able to see some **higher-level** operations in the graph _(through [Netron](https://github.com/lutzroeder/Netron) for instance)_ such as: - **EmbedLayerNormalization** - **Attention** - **FastGeLU** These operations are an example of the kind of optimization **onnxruntime** is doing, for instance here gathering multiple operations into bigger one _(Operator Fusing)_. ``` from transformers import BertTokenizerFast tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") cpu_model = create_model_for_provider("onnx/bert-base-cased.onnx", "CPUExecutionProvider") # Inputs are provided through numpy array model_inputs = tokenizer("My name is Bert", return_tensors="pt") inputs_onnx = {k: v.cpu().detach().numpy() for k, v in model_inputs.items()} # Run the model (None = get all the outputs) sequence, pooled = cpu_model.run(None, inputs_onnx) # Print information about outputs print(f"Sequence output: {sequence.shape}, Pooled output: {pooled.shape}") ``` # Benchmarking PyTorch model _Note: PyTorch model benchmark is run on CPU_ ``` from transformers import BertModel PROVIDERS = { ("cpu", "PyTorch CPU"), # Uncomment this line to enable GPU benchmarking # ("cuda:0", "PyTorch GPU") } results = {} for device, label in PROVIDERS: # Move inputs to the correct device model_inputs_on_device = { arg_name: tensor.to(device) for arg_name, tensor in model_inputs.items() } # Add PyTorch to the providers model_pt = BertModel.from_pretrained("bert-base-cased").to(device) for _ in trange(10, desc="Warming up"): model_pt(**model_inputs_on_device) # Compute time_buffer = [] for _ in trange(100, desc=f"Tracking inference time on PyTorch"): with track_infer_time(time_buffer): model_pt(**model_inputs_on_device) # Store the result results[label] = OnnxInferenceResult( time_buffer, None ) ``` ## Benchmarking PyTorch & ONNX on CPU _**Disclamer: results may vary from the actual hardware used to run the model**_ ``` PROVIDERS = { ("CPUExecutionProvider", "ONNX CPU"), # Uncomment this line to enable GPU benchmarking # ("CUDAExecutionProvider", "ONNX GPU") } for provider, label in PROVIDERS: # Create the model with the specified provider model = create_model_for_provider("onnx/bert-base-cased.onnx", provider) # Keep track of the inference time time_buffer = [] # Warm up the model model.run(None, inputs_onnx) # Compute for _ in trange(100, desc=f"Tracking inference time on {provider}"): with track_infer_time(time_buffer): model.run(None, inputs_onnx) # Store the result results[label] = OnnxInferenceResult( time_buffer, model.get_session_options().optimized_model_filepath ) %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np import os # Compute average inference time + std time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()} time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000 plt.rcdefaults() fig, ax = plt.subplots(figsize=(16, 12)) ax.set_ylabel("Avg Inference time (ms)") ax.set_title("Average inference time (ms) for each provider") ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std) plt.show() ``` # Quantization support from transformers Quantization enables the use of integers (_instead of floatting point_) arithmetic to run neural networks models faster. From a high-level point of view, quantization works as mapping the float32 ranges of values as int8 with the less loss in the performances of the model. Hugging Face provides a conversion tool as part of the transformers repository to easily export quantized models to ONNX Runtime. For more information, please refer to the following: - [Hugging Face Documentation on ONNX Runtime quantization supports](https://huggingface.co/transformers/master/serialization.html#quantization) - [Intel's Explanation of Quantization](https://nervanasystems.github.io/distiller/quantization.html) With this method, the accuracy of the model remains at the same level than the full-precision model. If you want to see benchmarks on model performances, we recommand reading the [ONNX Runtime notebook](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/Bert-GLUE_OnnxRuntime_quantization.ipynb) on the subject. # Benchmarking PyTorch quantized model ``` import torch # Quantize model_pt_quantized = torch.quantization.quantize_dynamic( model_pt.to("cpu"), {torch.nn.Linear}, dtype=torch.qint8 ) # Warm up model_pt_quantized(**model_inputs) # Benchmark PyTorch quantized model time_buffer = [] for _ in trange(100): with track_infer_time(time_buffer): model_pt_quantized(**model_inputs) results["PyTorch CPU Quantized"] = OnnxInferenceResult( time_buffer, None ) ``` # Benchmarking ONNX quantized model ``` from transformers.convert_graph_to_onnx import quantize # Transformers allow you to easily convert float32 model to quantized int8 with ONNX Runtime quantized_model_path = quantize(Path("bert.opt.onnx")) # Then you just have to load through ONNX runtime as you would normally do quantized_model = create_model_for_provider(quantized_model_path.as_posix(), "CPUExecutionProvider") # Warm up the overall model to have a fair comparaison outputs = quantized_model.run(None, inputs_onnx) # Evaluate performances time_buffer = [] for _ in trange(100, desc=f"Tracking inference time on CPUExecutionProvider with quantized model"): with track_infer_time(time_buffer): outputs = quantized_model.run(None, inputs_onnx) # Store the result results["ONNX CPU Quantized"] = OnnxInferenceResult( time_buffer, quantized_model_path ) ``` ## Show the inference performance of each providers ``` %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np import os # Compute average inference time + std time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()} time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000 plt.rcdefaults() fig, ax = plt.subplots(figsize=(16, 12)) ax.set_ylabel("Avg Inference time (ms)") ax.set_title("Average inference time (ms) for each provider") ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std) plt.show() ```
github_jupyter
# Example 1: Detecting an obvious outlier ``` import numpy as np from isotree import IsolationForest ### Random data from a standard normal distribution np.random.seed(1) n = 100 m = 2 X = np.random.normal(size = (n, m)) ### Will now add obvious outlier point (3, 3) to the data X = np.r_[X, np.array([3, 3]).reshape((1, m))] ### Fit a small isolation forest model iso = IsolationForest(ntrees = 10, ndim = 2, nthreads = 1) iso.fit(X) ### Check which row has the highest outlier score pred = iso.predict(X) print("Point with highest outlier score: ", X[np.argsort(-pred)[0], ]) ``` # Example 2: Plotting outlier and density regions ``` import numpy as np, pandas as pd from isotree import IsolationForest import matplotlib.pyplot as plt from pylab import rcParams %matplotlib inline rcParams['figure.figsize'] = 10, 8 np.random.seed(1) group1 = pd.DataFrame({ "x" : np.random.normal(loc=-1, scale=.4, size = 1000), "y" : np.random.normal(loc=-1, scale=.2, size = 1000), }) group2 = pd.DataFrame({ "x" : np.random.normal(loc=+1, scale=.2, size = 1000), "y" : np.random.normal(loc=+1, scale=.4, size = 1000), }) X = pd.concat([group1, group2], ignore_index=True) ### Now add an obvious outlier which is within the 1d ranges ### (As an interesting test, remove it and see what happens, ### or check how its score changes when using sub-sampling) X = X.append(pd.DataFrame({"x" : [-1], "y" : [1]}), ignore_index = True) ### Single-variable Isolatio Forest iso_simple = IsolationForest(ndim=1, ntrees=100, penalize_range=False, prob_pick_pooled_gain=0) iso_simple.fit(X) ### Extended Isolation Forest iso_ext = IsolationForest(ndim=2, ntrees=100, penalize_range=False, prob_pick_pooled_gain=0) iso_ext.fit(X) ### SCiForest iso_sci = IsolationForest(ndim=2, ntrees=100, ntry=10, penalize_range=True, prob_pick_avg_gain=1, prob_pick_pooled_gain=0) iso_sci.fit(X) ### Fair-Cut Forest iso_fcf = IsolationForest(ndim=2, ntrees=100, penalize_range=False, prob_pick_avg_gain=0, prob_pick_pooled_gain=1) iso_fcf.fit(X) ### Plot as a heatmap pts = np.linspace(-3, 3, 250) space = np.array( np.meshgrid(pts, pts) ).reshape((2, -1)).T Z_sim = iso_simple.predict(space) Z_ext = iso_ext.predict(space) Z_sci = iso_sci.predict(space) Z_fcf = iso_fcf.predict(space) space_index = pd.MultiIndex.from_arrays([space[:, 0], space[:, 1]]) def plot_space(Z, space_index, X): df = pd.DataFrame({"z" : Z}, index = space_index) df = df.unstack() df = df[df.columns.values[::-1]] plt.imshow(df, extent = [-3, 3, -3, 3], cmap = 'hot_r') plt.scatter(x = X['x'], y = X['y'], alpha = .15, c = 'navy') plt.suptitle("Outlier and Density Regions", fontsize = 20) plt.subplot(2, 2, 1) plot_space(Z_sim, space_index, X) plt.title("Isolation Forest", fontsize=15) plt.subplot(2, 2, 2) plot_space(Z_ext, space_index, X) plt.title("Extended Isolation Forest", fontsize=15) plt.subplot(2, 2, 3) plot_space(Z_sci, space_index, X) plt.title("SCiForest", fontsize=15) plt.subplot(2, 2, 4) plot_space(Z_fcf, space_index, X) plt.title("Fair-Cut Forest", fontsize=15) plt.show() print("(Note that the upper-left corner has an outlier point,\n\ and that there is a slight slide in the axes of the heat colors and the points)") ``` # Example 3: calculating pairwise distances ``` import numpy as np, pandas as pd from isotree import IsolationForest from scipy.spatial.distance import cdist ### Generate random multivariate-normal data np.random.seed(1) n = 1000 m = 10 ### This is a random PSD matrix to use as covariance S = np.random.normal(size = (m, m)) S = S.T.dot(S) mu = np.random.normal(size = m, scale = 2) X = np.random.multivariate_normal(mu, S, n) ### Fitting the model iso = IsolationForest(prob_pick_avg_gain=0, prob_pick_pooled_gain=0) iso.fit(X) ### Calculate approximate distance D_sep = iso.predict_distance(X, square_mat = True) ### Compare against other distances D_euc = cdist(X, X, metric = "euclidean") D_cos = cdist(X, X, metric = "cosine") D_mah = cdist(X, X, metric = "mahalanobis") ### Correlations print("Correlations between different distance metrics") pd.DataFrame( np.corrcoef([D_sep.reshape(-1), D_euc.reshape(-1), D_cos.reshape(-1), D_mah.reshape(-1)]), columns = ['SeparaionDepth', 'Euclidean', 'Cosine', 'Mahalanobis'], index = ['SeparaionDepth', 'Euclidean', 'Cosine', 'Mahalanobis'] ) ``` # Example 4: imputing missing values ``` import numpy as np from isotree import IsolationForest ### Generate random multivariate-normal data np.random.seed(1) n = 1000 m = 5 ### This is a random PSD matrix to use as covariance S = np.random.normal(size = (m, m)) S = S.T.dot(S) mu = np.random.normal(size = m) X = np.random.multivariate_normal(mu, S, n) ### Set some values randomly as missing values_NA = (np.random.random(size = n * m) <= .15).reshape((n, m)) X_na = X.copy() X_na[values_NA] = np.nan ### Fitting the model iso = IsolationForest(build_imputer=True, prob_pick_pooled_gain=1, ntry=10) iso.fit(X_na) ### Impute missing values X_imputed = iso.transform(X_na) print("MSE for imputed values w/model: %f\n" % np.mean((X[values_NA] - X_imputed[values_NA])**2)) ### Comparison against simple mean imputation X_means = np.nanmean(X_na, axis = 0) X_imp_mean = X_na.copy() for cl in range(m): X_imp_mean[np.isnan(X_imp_mean[:,cl]), cl] = X_means[cl] print("MSE for imputed values w/means: %f\n" % np.mean((X[values_NA] - X_imp_mean[values_NA])**2)) ```
github_jupyter
# 决策树 ----- ``` # 准备工作 # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = ".." CHAPTER_ID = "decision_trees" def image_path(fig_id): return os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id) def save_fig(fig_id, tight_layout=True): print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(image_path(fig_id) + ".png", format='png', dpi=300) ``` # 训练与可视化 ``` from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier iris = load_iris() X = iris.data[:, 2:] # petal length and width y = iris.target tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42) tree_clf.fit(X, y) from sklearn.tree import export_graphviz export_graphviz(tree_clf, out_file=image_path("iris_tree.dot"), feature_names=iris.feature_names[2:], class_names = iris.target_names, rounded=True, filled=True, ) ``` 根据上面得到的dot文件,可以使用`$ dot -Tpng iris_tree.dot -o iris_tree.png `命令转换为图片,如下: ![iris_tree.png](attachment:iris_tree.png) 上图可以看到树是的预测过程。假设想分类鸢尾花, 可以从根节点开始。 首先看花瓣宽度, 如果小于2.45cm, 分入左边节点(深度1,左)。这种情况下,叶子节点不同继续询问,可以直接预测为Setosa鸢尾花。 如果宽度大于2.45cm, 移到右边子节点继续判断。由于不是叶子节点,因此继续判断, 花萼宽度如果小于1.75cm,则很大可能是Versicolor花(深度2, 左)。否则,可能是Virginica花(深度2, 右)。 其中参数含义如下:sample表示训练实例的个数。比如右节点中有100个实例, 花瓣宽度大于2.45cm。(深度1) 其中54个花萼宽度小于1.75cm。value表示实例中每个类别的分分类个数。 gini系数表示实例的杂乱程度。如果等于0, 表示所有训练实例都属于同一个类别。如上setosa花分类。 公式可以计算第i个节点的gini分数。$G_i = 1 - \sum_{k=1}^{n} p_{i,k}^{2}$ P(i,k)表示k实例在i节点中的分布比例。 比如2层左节点的gini等于:$1-(0/50)^{2}-(49/50)^{2}-(5/50)^{2} = 0.168$。 注意:sklearn中使用CART,生成二叉树。但是像ID3可以生成多个孩子的决策树。 ``` from matplotlib.colors import ListedColormap def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True): x1s = np.linspace(axes[0], axes[1], 100) x2s = np.linspace(axes[2], axes[3], 100) x1, x2 = np.meshgrid(x1s, x2s) X_new = np.c_[x1.ravel(), x2.ravel()] y_pred = clf.predict(X_new).reshape(x1.shape) custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0']) plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap, linewidth=10) if not iris: custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50']) plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8) if plot_training: plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa") plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor") plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica") plt.axis(axes) if iris: plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) else: plt.xlabel(r"$x_1$", fontsize=18) plt.ylabel(r"$x_2$", fontsize=18, rotation=0) if legend: plt.legend(loc="lower right", fontsize=14) plt.figure(figsize=(8, 4)) plot_decision_boundary(tree_clf, X, y) plt.plot([2.45, 2.45], [0, 3], "k-", linewidth=2) plt.plot([2.45, 7.5], [1.75, 1.75], "k--", linewidth=2) plt.plot([4.95, 4.95], [0, 1.75], "k:", linewidth=2) plt.plot([4.85, 4.85], [1.75, 3], "k:", linewidth=2) plt.text(1.40, 1.0, "Depth=0", fontsize=15) plt.text(3.2, 1.80, "Depth=1", fontsize=13) plt.text(4.05, 0.5, "(Depth=2)", fontsize=11) save_fig("decision_tree_decision_boundaries_plot") plt.show() ``` 上图显示了该决策树的决策边界。垂直线表示决策树的根节点(深度0), 花瓣长度等于2.45cm。 由于左边gini为0,只有一种分类,不再进一步分类判断。但是右边不是很纯,因此深度1的右边节点根据花萼宽度1.75cm进一步判断。 由于最大深度为2,决策树停止后面的判断。但是可以设置max_depth为3, 然后,两个深度2节点将各自添加另一个决策边界(由虚线表示)。 补充:可以看到决策树的过程容易理解,称之为白盒模型。与之不同的是,随机森林和神经网络一般称为黑盒模型。 它们预测效果很好,可以很容易地检查其计算结果, 来做出这些预测。但却难以解释为什么这样预测。 决策树提供了很好的和简单的分类规则,甚至可以在需要时手动分类。 # 进行预测和计算可能性 ``` tree_clf.predict_proba([[5, 1.5]]) tree_clf.predict([[5, 1.5]]) ``` ### CART:分类回归树 sklearn使用CART算法对训练决策树(增长树)。思想很简单:首先将训练集分为两个子集,根据特征k和阈值$t_k$(比如花瓣长度小于2.45cm)。重要的是怎么选出这个特征。 通过对每一组最纯的子集(k, $t_k$),根据大小的权重进行搜索。最小化如下损失函数: #### CART分类的损失函数 $J(k, t_k) = \frac{m_{left}}{m}G_{left} + \frac{m_{right}}{m}G_{right} $ ![gini.png](attachment:gini.png) 最小化如上函数,一旦成功分为两个子集, 就可以使用相同逻辑递归进行切分。当到达给定的最大深度时(max_depth)停止,或者不能再继续切分(数据集很纯,无法减少杂质)。 如下超参数控制其他的停止条件(min_samples_split, min_sample_leaf, min_weight_fraction_leaf, max_leaf_nodes). ### 计算复杂度 默认的,经常使用gini impurity测量标准,但是也可以使用entropy impuirty来测量。 ![gini_2.png](attachment:gini_2.png) ``` not_widest_versicolor = (X[:, 1] != 1.8) | (y==2) X_tweaked = X[not_widest_versicolor] y_tweaked = y[not_widest_versicolor] tree_clf_tweaked = DecisionTreeClassifier(max_depth=2, random_state=40) tree_clf_tweaked.fit(X_tweaked, y_tweaked) plt.figure(figsize=(8, 4)) plot_decision_boundary(tree_clf_tweaked, X_tweaked, y_tweaked, legend=False) plt.plot([0, 7.5], [0.8, 0.8], "k-", linewidth=2) plt.plot([0, 7.5], [1.75, 1.75], "k-", linewidth=2) plt.text(1.0, 0.9, "Depth=0", fontsize=15) plt.text(1.0, 1.80, "Depth=0", fontsize=13) save_fig("decision_tree_instability_plot") plt.show() ``` ### 限制超参数 如下情况所示,防止过拟合数据,需要限制决策树的自由度,这个过程也叫正则(限制)。 决策树的max_depth超参数来控制拟合程度,默认不限制。可以减少max_depth来限制模型,减少过拟合的风险。 ``` from sklearn.datasets import make_moons Xm, ym = make_moons(n_samples=100, noise=0.25, random_state=53) deep_tree_clf1 = DecisionTreeClassifier(random_state=42) deep_tree_clf2 = DecisionTreeClassifier(min_samples_leaf=4, random_state=42) deep_tree_clf1.fit(Xm, ym) deep_tree_clf2.fit(Xm, ym) plt.figure(figsize=(11, 4)) plt.subplot(121) plot_decision_boundary(deep_tree_clf1, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False) plt.title("No restrictions", fontsize=16) plt.subplot(122) plot_decision_boundary(deep_tree_clf2, Xm, ym, axes=[-1.5, 2.5, -1, 1.5], iris=False) plt.title("min_samples_leaf = {}".format(deep_tree_clf2.min_samples_leaf), fontsize=14) save_fig("min_samples_leaf_plot") plt.show() ``` DecisionTreeClassifier有如下超参数:min_samples_split表示切分数据时包含的最小实例, min_samples_leaf表示一个叶子节点必须拥有的最小样本数目, min_weight_fraction_leaf(与min_samples_leaf相同,但表示为加权实例总数的一小部分), max_leaf_nodes(最大叶节点数)和max_features(在每个节点上分配的最大特性数), 增加min_*超参数或减少max_*超参数将使模型规范化。 其他算法开始时对决策树进行无约束训练,之后删除没必要的特征,称为减枝。 如果一个节点的所有子节点所提供的纯度改善没有统计学意义,则认为其和其子节点是不必要的。 ``` angle = np.pi / 180 * 20 rotation_maxtrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) Xr = X.dot(rotation_maxtrix) tree_clf_r = DecisionTreeClassifier(random_state=42) tree_clf_r.fit(Xr, y) plt.figure(figsize=(8, 3)) plot_decision_boundary(tree_clf_r, Xr, y, axes=[0.5, 7.5, -1.0, 1], iris=False) plt.show() ``` # 不稳定性 目前为止,决策树有很多好处:它们易于理解和解释,易于使用,用途广泛,而且功能强大。 但是也有一些限制。首先, 决策树喜欢正交决策边界(所有的分割都垂直于一个轴),这使得它们对训练集的旋转很敏感。如下右图所示,旋转45度之后,尽管分类的很好,但是不会得到更大推广。其中的一种解决办法是PCA(后面介绍)。 更普遍的,决策树对训练集中的微小变化很敏感。比如上图中移除一个实例的分类结果又很大的不同。 随机森林可以通过对许多树进行平均预测来限制这种不稳定, 对异常值,微小变化更加适用。 ``` np.random.seed(6) Xs = np.random.rand(100, 2) - 0.5 ys = (Xs[:, 0] > 0).astype(np.float32) * 2 angle = np.pi / 4 rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) Xsr = Xs.dot(rotation_matrix) tree_clf_s = DecisionTreeClassifier(random_state=42) tree_clf_s.fit(Xs, ys) tree_clf_sr = DecisionTreeClassifier(random_state=42) tree_clf_sr.fit(Xsr, ys) plt.figure(figsize=(11, 4)) plt.subplot(121) plot_decision_boundary(tree_clf_s, Xs, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False) plt.subplot(122) plot_decision_boundary(tree_clf_sr, Xsr, ys, axes=[-0.7, 0.7, -0.7, 0.7], iris=False) save_fig("sensitivity_to_rotation_plot") plt.show() ``` ### 回归树 ``` import numpy as np # 带噪声的2阶训练集 np.random.seed(42) m = 200 X = np.random.rand(m ,1) y = 4 * (X - 0.5) ** 2 y = y + np.random.randn(m, 1) / 10 from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(max_depth=2, random_state=42) tree_reg.fit(X, y) ``` 该回归决策树最大深度为2, dot后如下: ![regression_tree.png](attachment:regression_tree.png) 与分类树非常类似。 主要的不同在于,分类树根据每个节点预测每个分类。 比如当x1 = 0.6时进行预测。从根开始遍历树,最终到达叶节点,该节点预测值=0.1106。 这个预测仅仅是与此叶节点相关的110个训练实例的平均目标值。这个预测的结果是一个平均平方误差(MSE),在这110个实例中等于0.0151。 请注意,每个区域的预测值始终是该区域实例的平均目标值。该算法以一种使大多数训练实例尽可能接近预测值的方式来分割每个区域。 ``` from sklearn.tree import DecisionTreeRegressor tree_reg1 = DecisionTreeRegressor(random_state=42, max_depth=2) tree_reg2 = DecisionTreeRegressor(random_state=42, max_depth=3) tree_reg1.fit(X, y) tree_reg2.fit(X, y) def plot_regression_predictions(tree_reg, X, y, axes=[0, 1, -0.2, 1], ylabel="$y$"): x1 = np.linspace(axes[0], axes[1], 500).reshape(-1, 1) y_pred = tree_reg.predict(x1) plt.axis(axes) if ylabel: plt.ylabel(ylabel, fontsize=18, rotation=0) plt.plot(X, y, "b.") plt.plot(x1, y_pred, "r.-", linewidth=2, label=r"$\hat{y}$") plt.figure(figsize=(11, 4)) plt.subplot(121) plot_regression_predictions(tree_reg1, X, y) for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")): plt.plot([split, split], [-0.2, 1], style, linewidth=2) plt.text(0.21, 0.65, "Depth=0", fontsize=15) plt.text(0.01, 0.2, "Depth=1", fontsize=13) plt.text(0.65, 0.8, "Depth=1", fontsize=13) plt.legend(loc="upper center", fontsize=18) plt.title("max_depth=2", fontsize=14) plt.subplot(122) plot_regression_predictions(tree_reg2, X, y, ylabel=None) for split, style in ((0.1973, "k-"), (0.0917, "k--"), (0.7718, "k--")): plt.plot([split, split], [-0.2, 1], style, linewidth=2) for split in (0.0458, 0.1298, 0.2873, 0.9040): plt.plot([split, split], [-0.2, 1], "k:", linewidth=1) plt.text(0.3, 0.5, "Depth=2", fontsize=13) plt.title("max_depth=3", fontsize=14) save_fig("tree_regression_plot") plt.show() # 画出分类图 export_graphviz( tree_reg1, out_file=image_path("regression_tree.dot"), feature_names=["x1"], rounded=True, filled=True ) tree_reg1 = DecisionTreeRegressor(random_state=42) tree_reg2 = DecisionTreeRegressor(random_state=42, min_samples_leaf=10) tree_reg1.fit(X, y) tree_reg2.fit(X, y) x1 = np.linspace(0, 1, 500).reshape(-1, 1) y_pred1 = tree_reg1.predict(x1) y_pred2 = tree_reg2.predict(x1) plt.figure(figsize=(11, 4)) plt.subplot(121) plt.plot(X, y, "b.") plt.plot(x1, y_pred1, "r.-", linewidth=2, label=r"$\hat{y}$") plt.axis([0, 1, -0.2, 1.1]) plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", fontsize=18, rotation=0) plt.legend(loc="upper center", fontsize=18) plt.title("No restrictions", fontsize=14) plt.subplot(122) plt.plot(X, y, "b.") plt.plot(x1, y_pred2, "r.-", linewidth=2, label=r"$\hat{y}$") plt.axis([0, 1, -0.2, 1.1]) plt.xlabel("$x_1$", fontsize=18) plt.title("min_samples_leaf={}".format(tree_reg2.min_samples_leaf), fontsize=14) save_fig("tree_regression_regularization_plot") ``` ![regression_tree.png](attachment:regression_tree.png) 如上图所示, 回归树根据最小化mse来切分数据集。 决策树在处理回归任务时倾向于过度拟合。 如上左图,超参数默认,不加约束时容易过拟合。通过设置min_samples_leaf 将模型更合理的约束。 # 课后习题 #### 1. 无约束情况下,一百万个实例的训练集训练得到的决策树的大约深度是多少? #### 2. 节点的gini impurity一般是小于还是大于其父节点?一般情况下这样,还是一直都这样? #### 3. 如果决策树过拟合, 减少max_depth是一个好方法吗? #### 4. 如果决策树欠拟合,缩放输入的特征是一个好方法吗? #### 5. 如果在一个包含100万个实例的训练集上训练决策树需要一个小时,那么在包含1000万个实例的训练集上训练另一个决策树需要花费多少时间呢? #### 6. 如果您的训练集包含100,000个实例,将设置presort=True 可以加快训练吗? #### 7. 训练并调节决策树模型,使用moons数据集。 a. 使用 make_moons(n_samples=10000, noise=0.4)生成数据集。 b. 使用train_test_split(). 切分数据集。 c. 使用网格搜索并进行交叉验证,去找到最合适的超参数。尝试max_leaf_nodes参数。 d. 使用全部数据进行训练,并在测试集上估计性能。应该在85到87之间。 #### 8. 生成森林。 a. 继续上一题, 生成训练集的1000个子集所谓验证集。随机选择100实例。 b. 每一个子集训练一棵树,使用上述得到的最合适超参数。在测试集上评估这1000棵树。 由于它们是在较小的集合上进行训练的,所以这些决策树可能比第一个决策树更糟糕,只实现了大约80%的精度。 c. 对于每个测试集实例,生成1,000个决策树的预测,并且只保留最频繁的预测(您可以使用SciPy的mode()函数来实现这一点)。这给了对测试集的多数投票预测. d. 评估测试集的这些预测:您应该获得比第一个模型稍高的精度(大约高0.5到1.5%)。恭喜你,你训练了一个随机森林分类器!
github_jupyter
``` %matplotlib inline ``` Creating Extensions Using numpy and scipy ========================================= **Author**: `Adam Paszke <https://github.com/apaszke>`_ **Updated by**: `Adam Dziedzic <https://github.com/adam-dziedzic>`_ In this tutorial, we shall go through two tasks: 1. Create a neural network layer with no parameters. - This calls into **numpy** as part of its implementation 2. Create a neural network layer that has learnable weights - This calls into **SciPy** as part of its implementation ``` import torch from torch.autograd import Function ``` Parameter-less example ---------------------- This layer doesn’t particularly do anything useful or mathematically correct. It is aptly named BadFFTFunction **Layer Implementation** ``` from numpy.fft import rfft2, irfft2 class BadFFTFunction(Function): @staticmethod def forward(ctx, input): numpy_input = input.detach().numpy() result = abs(rfft2(numpy_input)) return input.new(result) @staticmethod def backward(ctx, grad_output): numpy_go = grad_output.numpy() result = irfft2(numpy_go) return grad_output.new(result) # since this layer does not have any parameters, we can # simply declare this as a function, rather than as an nn.Module class def incorrect_fft(input): return BadFFTFunction.apply(input) ``` **Example usage of the created layer:** ``` input = torch.randn(8, 8, requires_grad=True) result = incorrect_fft(input) print(result) result.backward(torch.randn(result.size())) print(input) ``` Parametrized example -------------------- In deep learning literature, this layer is confusingly referred to as convolution while the actual operation is cross-correlation (the only difference is that filter is flipped for convolution, which is not the case for cross-correlation). Implementation of a layer with learnable weights, where cross-correlation has a filter (kernel) that represents weights. The backward pass computes the gradient wrt the input and the gradient wrt the filter. ``` from numpy import flip import numpy as np from scipy.signal import convolve2d, correlate2d from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class ScipyConv2dFunction(Function): @staticmethod def forward(ctx, input, filter, bias): # detach so we can cast to NumPy input, filter, bias = input.detach(), filter.detach(), bias.detach() result = correlate2d(input.numpy(), filter.numpy(), mode='valid') result += bias.numpy() ctx.save_for_backward(input, filter, bias) return torch.as_tensor(result, dtype=input.dtype) @staticmethod def backward(ctx, grad_output): grad_output = grad_output.detach() input, filter, bias = ctx.saved_tensors grad_output = grad_output.numpy() grad_bias = np.sum(grad_output, keepdims=True) grad_input = convolve2d(grad_output, filter.numpy(), mode='full') # the previous line can be expressed equivalently as: # grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full') grad_filter = correlate2d(input.numpy(), grad_output, mode='valid') return torch.from_numpy(grad_input), torch.from_numpy(grad_filter).to(torch.float), torch.from_numpy(grad_bias).to(torch.float) class ScipyConv2d(Module): def __init__(self, filter_width, filter_height): super(ScipyConv2d, self).__init__() self.filter = Parameter(torch.randn(filter_width, filter_height)) self.bias = Parameter(torch.randn(1, 1)) def forward(self, input): return ScipyConv2dFunction.apply(input, self.filter, self.bias) ``` **Example usage:** ``` module = ScipyConv2d(3, 3) print("Filter and bias: ", list(module.parameters())) input = torch.randn(10, 10, requires_grad=True) output = module(input) print("Output from the convolution: ", output) output.backward(torch.randn(8, 8)) print("Gradient for the input map: ", input.grad) ``` **Check the gradients:** ``` from torch.autograd.gradcheck import gradcheck moduleConv = ScipyConv2d(3, 3) input = [torch.randn(20, 20, dtype=torch.double, requires_grad=True)] test = gradcheck(moduleConv, input, eps=1e-6, atol=1e-4) print("Are the gradients correct: ", test) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TensorFlow 2.0 での tf.function と AutoGraph <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/guide/autograph"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/beta/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ja/beta/guide/autograph.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [docs-ja@tensorflow.org メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。 TensorFlow 2.0 では Eager Execution の使いやすさとTensorFlow 1.0 のパワーとを同時に提供します。この統合の中核となるのは `tf.function` です。これは Python の構文のサブセットを移植可能でハイパフォーマンスな TensorFlow のグラフに変換します。 `tf.function`の魅力的な特徴に AutoGraph があります。これはグラフを Python の構文そのものを用いて記述できるようにします。 AutoGraph で利用可能な Python の機能の一覧は、[AutoGraph Capabilities and Limitations (Autograph の性能と制限事項)](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/LIMITATIONS.md) で確認できます。また、`tf.function`の詳細については RFC [TF 2.0: Functions, not Sessions](https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md) を参照してください。AutoGraph の詳細については `tf.autograph` を参照してください。 このチュートリアルでは `tf.function` と AutoGraph の基本的な特徴についてひととおり確認します。 ## セットアップ TensorFlow 2.0 Preview Nightly をインポートして、TF 2.0 モードを有効にしてください。 ``` from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np !pip install tensorflow==2.0.0-beta1 import tensorflow as tf ``` ## `tf.function` デコレータ `tf.function`を用いてある関数にアノテーションを付けたとしても、一般の関数と変わらずに呼び出せます。一方、実行時にはその関数はグラフへとコンパイルされます。これにより、より高速な実行や、 GPU や TPU での実行、SavedModel へのエクスポートといった利点が得られます。 ``` @tf.function def simple_nn_layer(x, y): return tf.nn.relu(tf.matmul(x, y)) x = tf.random.uniform((3, 3)) y = tf.random.uniform((3, 3)) simple_nn_layer(x, y) ``` アノテーションの結果を調べてみると、 TensorFlow ランタイムとのやり取りのすべてを処理する特別な呼び出し可能オブジェクトを確認できます。 ``` simple_nn_layer ``` 記述したコードで複数の関数を利用していたとしても、すべての関数にアノテーションを付ける必要はありません。アノテーションをつけた関数から呼び出されるすべての関数は、グラフモードで実行されます。 ``` def linear_layer(x): return 2 * x + 1 @tf.function def deep_net(x): return tf.nn.relu(linear_layer(x)) deep_net(tf.constant((1, 2, 3))) ``` グラフが大量の軽量な演算から構成される場合、関数は Eager Execution で実行するコードよりも高速になる場合があります。しかし、 graph が少量の (畳み込み演算のような) 計算に時間のかかる演算からなる場合、高速化はそれほど見込めないでしょう。 ``` import timeit conv_layer = tf.keras.layers.Conv2D(100, 3) @tf.function def conv_fn(image): return conv_layer(image) image = tf.zeros([1, 200, 200, 100]) # warm up conv_layer(image); conv_fn(image) print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10)) print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10)) print("Note how there's not much difference in performance for convolutions") lstm_cell = tf.keras.layers.LSTMCell(10) @tf.function def lstm_fn(input, state): return lstm_cell(input, state) input = tf.zeros([10, 10]) state = [tf.zeros([10, 10])] * 2 # warm up lstm_cell(input, state); lstm_fn(input, state) print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10)) print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10)) ``` ## Python の制御フローの利用 `tf.function`の内部でデータに依存した制御フローを用いる場合、Pythonの制御フロー構文を用いることができます。AutoGraph はそれらの構文を TensorFlow の Ops に書き換えます。たとえば、 `Tensor` に依存する `if` 文は、`tf.cond()` に変換されます。 次の例では `x` は `Tensor` です。ですが、 `if` 文は期待するどおりに動作しています。 ``` @tf.function def square_if_positive(x): if x > 0: x = x * x else: x = 0 return x print('square_if_positive(2) = {}'.format(square_if_positive(tf.constant(2)))) print('square_if_positive(-2) = {}'.format(square_if_positive(tf.constant(-2)))) ``` Note: この例ではスカラー値を用いた単純な条件を用いています。実利用する場合、典型的には<a href="#batching">バッチ処理</a>が用いられます。 AutoGraph は `while`, `for`, `if`, `break`, `continue`, `return` といった典型的なPythonの構文をサポートしています。また、これらを入れ子にして利用する場合もサポートしています。つまり、`Tensor`を返す式を`while` 文や `if` 文の条件式として用いることが可能です。また、`for` 文で `Tensor` の要素に渡って反復することも可能です。 ``` @tf.function def sum_even(items): s = 0 for c in items: if c % 2 > 0: print(c) continue s += c return s sum_even(tf.constant([10, 12, 15, 20])) ``` より高度な使い方をするユーザーのために、AutoGraph は低レベルAPIも提供しています。次の例では AutoGraph が生成したコードを確認できます。 ``` print(tf.autograph.to_code(sum_even.python_function)) ``` 次はより複雑な制御フローの例です。 ``` @tf.function def fizzbuzz(n): msg = tf.constant('') for i in tf.range(n): if tf.equal(i % 3, 0): tf.print('Fizz') elif tf.equal(i % 5, 0): tf.print('Buzz') else: tf.print(i) fizzbuzz(tf.constant(15)) ``` ## Keras での AutoGraph の利用 `tf.function` はオブジェクトのメソッドに対しても利用できます。たとえば、カスタムしたKeras モデルにデコレーターを適用できます、典型的には `call` 関数にアノテーションを付けることで実現できるでしょう。より詳細が必要な場合、`tf.keras` を確認してください。 ``` class CustomModel(tf.keras.models.Model): @tf.function def call(self, input_data): if tf.reduce_mean(input_data) > 0: return input_data else: return input_data // 2 model = CustomModel() model(tf.constant([-2, -4])) ``` ## 副作用 Eager モードのように、通常の場合 `tf.function` の中で、`tf.assign` や `tf.print` といった副作用のある命令を実行できます。また、実行時の順序を保つために、処理順について必要な依存関係を書き加えます。 ``` v = tf.Variable(5) @tf.function def find_next_odd(): v.assign(v + 1) if tf.equal(v % 2, 0): v.assign(v + 1) find_next_odd() v ``` ## 例: シンプルなモデルの学習 AutoGraph はこれまで見てきたよりもずっと多くの演算を TensorFlow の内部で実行できます。たとえば、学習のためのループ処理は単に制御フローなので、実際にそれを TensorFlow に持ち込んで処理できます。 ### データのダウンロード ``` def prepare_mnist_features_and_labels(x, y): x = tf.cast(x, tf.float32) / 255.0 y = tf.cast(y, tf.int64) return x, y def mnist_dataset(): (x, y), _ = tf.keras.datasets.mnist.load_data() ds = tf.data.Dataset.from_tensor_slices((x, y)) ds = ds.map(prepare_mnist_features_and_labels) ds = ds.take(20000).shuffle(20000).batch(100) return ds train_dataset = mnist_dataset() ``` ### モデルの定義 ``` model = tf.keras.Sequential(( tf.keras.layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(10))) model.build() optimizer = tf.keras.optimizers.Adam() ``` ### 学習のためのループ処理の定義 ``` compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() def train_one_step(model, optimizer, x, y): with tf.GradientTape() as tape: logits = model(x) loss = compute_loss(y, logits) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) compute_accuracy(y, logits) return loss @tf.function def train(model, optimizer): train_ds = mnist_dataset() step = 0 loss = 0.0 accuracy = 0.0 for x, y in train_ds: step += 1 loss = train_one_step(model, optimizer, x, y) if tf.equal(step % 10, 0): tf.print('Step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) return step, loss, accuracy step, loss, accuracy = train(model, optimizer) print('Final step', step, ': loss', loss, '; accuracy', compute_accuracy.result()) ``` ## バッチ処理 実際のアプリケーションにおいて、処理をバッチにまとめることはパフォーマンスの観点から重要です。AutoGraphを用いるのにもっとも適しているコードは、制御フローを _バッチ_ の単位で決定するようなコードです。もし、個々の _要素_ の単位で制御を決定する場合、パフォーマンスを保つために batch API を試してみてください。 一例として、次の Python コードががあったとします。 ``` def square_if_positive(x): return [i ** 2 if i > 0 else i for i in x] square_if_positive(range(-5, 5)) ``` TensorFlowに同等の処理を行わせる場合、次のように記述したくなるかもしれません。 (これは実際には動作します!) ``` @tf.function def square_if_positive_naive(x): result = tf.TensorArray(tf.int32, size=x.shape[0]) for i in tf.range(x.shape[0]): if x[i] > 0: result = result.write(i, x[i] ** 2) else: result = result.write(i, x[i]) return result.stack() square_if_positive_naive(tf.range(-5, 5)) ``` しかし、この場合、次のように書くこともできます。 ``` def square_if_positive_vectorized(x): return tf.where(x > 0, x ** 2, x) square_if_positive_vectorized(tf.range(-5, 5)) ```
github_jupyter
# SageMaker Pipelines to Train a BERT-Based Text Classifier In this lab, we will do the following: * Define a set of Workflow Parameters that can be used to parametrize a Workflow Pipeline * Define a Processing step that performs cleaning and feature engineering, splitting the input data into train and test data sets * Define a Training step that trains a model on the pre-processed train data set * Define a Processing step that evaluates the trained model's performance on the test data set * Define a Register Model step that creates a model package from the estimator and model artifacts used in training * Define a Conditional step that measures a condition based on output from prior steps and conditionally executes the Register Model step * Define and create a Pipeline in a Workflow DAG, with the defined parameters and steps defined * Start a Pipeline execution and wait for execution to complete # Terminology Amazon SageMaker Pipelines support the following steps: * Pipelines - A Directed Acyclic Graph of steps and conditions to orchestrate SageMaker jobs and resource creation. * Processing Job steps - A simplified, managed experience on SageMaker to run data processing workloads, such as feature engineering, data validation, model evaluation, and model interpretation. * Training Job steps - An iterative process that teaches a model to make predictions by presenting examples from a training dataset. * Conditional step execution - Provides conditional execution of branches in a pipeline. * Registering Models - Creates a model package resource in the Model Registry that can be used to create deployable models in Amazon SageMaker. * Parametrized Pipeline executions - Allows pipeline executions to vary by supplied parameters. * Transform Job steps - A batch transform to preprocess datasets to remove noise or bias that interferes with training or inference from your dataset, get inferences from large datasets, and run inference when you don't need a persistent endpoint. # Our BERT Pipeline In the Processing Step, we perform Feature Engineering to create BERT embeddings from the `review_body` text using the pre-trained BERT model, and split the dataset into train, validation and test files. To optimize for Tensorflow training, we saved the files in TFRecord format. In the Training Step, we fine-tune the BERT model to our Customer Reviews Dataset and add a new classification layer to predict the `star_rating` for a given `review_body`. In the Evaluation Step, we take the trained model and a test dataset as input, and produce a JSON file containing classification evaluation metrics. In the Condition Step, we decide whether to register this model if the accuracy of the model, as determined by our evaluation step exceeded some value. ![](./img/bert_sagemaker_pipeline.png) The pipeline that we create follows a typical Machine Learning Application pattern of pre-processing, training, evaluation, and model registration: ![A typical ML Application pipeline](img/pipeline-full.png) # Release Resources ``` %%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script> %%javascript try { Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); } catch(err) { // NoOp } ```
github_jupyter
``` #These dictionaries describe the local hour of the satellite local_times = {"aquaDay":"13:30", "terraDay":"10:30", "terraNight":"22:30", "aquaNight":"01:30" } # and are used to load the correct file for dealing with the date-line. min_hours = {"aquaDay":2, "terraDay":-1, "aquaNight":-1, "terraNight":11} max_hours = {"aquaDay":24, "terraDay":22, "aquaNight":13, "terraNight":24} import xarray as xr import pandas as pd import numpy as np import matplotlib.pyplot as plt #Data loader for the satellite data, #returns a complete global map (regular lat-lon) #but sparsely filled, with only one stripe of data #showing where the satellite passed that hour def get_satellite_slice(date : str, utc_hour : int, satellite='aquaDay', latitude_bound = None #Recommend only using |lat| < 70 degrees ): #Due to crossing of the datetime, some times will be saved different date if utc_hour < min_hours[satellite]: file_date = str((np.datetime64(date) - np.timedelta64(1,'D'))) elif utc_hour > max_hours[satellite]: file_date = str((np.datetime64(date) + np.timedelta64(1,'D'))) else: file_date = date #print ('the UTC hour is', utc_hour) #print ('the file date is', file_date) #Open .tif file sat_xr = xr.open_rasterio(f'{satellite_folder}/{satellite}_errorGTE03K_04km_{file_date}.tif') #Rename spatial dimensions sat_xr = sat_xr.rename({'x':'longitude','y':'latitude'}) #Create time delta to change local to UTC time_delta = pd.to_timedelta(sat_xr.longitude.data/15,unit='H') #Convert local satellite time to UTC and round to nearest hour time = (pd.to_datetime([file_date + " " + "13:30"]*time_delta.shape[0]) - time_delta).round('H') #display(time) #print(time) #Select desired hour dt = np.datetime64(f'{date} {utc_hour:02}:00:00') right_time = np.expand_dims(time == dt,axis=(0,1)) #print ('right time', right_time.sum()) if right_time.sum() == 0: print("Warning: Correct time not found in dataset, likely problem in file selection") #Make subset subset = np.logical_and(np.isfinite(sat_xr),right_time) if subset.sum() == 0: print(f"Warning: No valid data found for {date} {utc_hour:02}h") if latitude_bound is not None: #print(f"Subsetting < {latitude_bound}") subset = np.logical_and(subset,np.expand_dims(np.abs(sat_xr.latitude) < latitude_bound,axis=(0,-1))) #Select valid data test_subset = sat_xr.where(subset).load() sat_xr.close() sat_xr = None #display(test_subset) #display(test_subset[0,::-1,:]) #display(test_subset.squeeze('band')) return test_subset[0,::-1,:] satellite_folder = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/' test_data = get_satellite_slice('2018-01-03',11,latitude_bound=70) plt.figure(dpi=200) #Make grid for plotting purposes xv, yv = np.meshgrid(test_data.longitude, test_data.latitude, indexing='xy') #Contour plot plt.contourf(xv,yv,test_data,levels=np.arange(230,320,10)) plt.xlim(0,100) plt.ylim(-80,-60) plt.colorbar() blob = [True,False] sum(blob) def get_era_data(date : str, utc_hour : int, field = 't2m'): print ('date = ', '_'.join(date.split('-'))) month = '_'.join(date.split('-')[:-1]) print ('month', month) ds_era = xr.open_dataset(f'{era_folder}/sfc_unstructured_{month}.grib',engine='cfgrib') #Grab correct field da = ds_era[field] # time_str = f"{date} {utc_hour:02}:00:00" print (time_str) da = da.sel(time=time_str) #Relabel longitude coordinate to be consistent with MODIS da = da.assign_coords({"longitude": (((da.longitude + 180) % 360) - 180)}) #Load data, perhaps this is too early? da = da.load() #Close file, attempt to not have memory leaks ds_era.close() ds_era = None return da era_folder = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/' da = get_era_data('2019-01-01',10) ``` --- ``` root = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/' f = 'aquaDay_errorGTE03K_04km_2019-01-01.tif' sat_xr = xr.open_rasterio(root+f) #load the temperature data array sat_xr = sat_xr.rename({'x':'longitude','y':'latitude'}) time_delta = pd.to_timedelta(sat_xr.longitude.data/15,unit='H') file_date = '2019-01-01' local_times = "13:30" time = (pd.to_datetime([file_date + " " + local_times]*time_delta.shape[0]) - time_delta).round('H') time #Select desired hour date = '2019-01-01' utc_hour = 10 dt = np.datetime64(f'{date} {utc_hour:02}:00:00') right_time = np.expand_dims(time == dt,axis=(0,1)) if right_time.sum() == 0: print("Warning: Correct time not found in dataset, likely problem in file selection") #Make subset subset = np.logical_and(np.isfinite(sat_xr),right_time) subset ```
github_jupyter
#### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! #### Version Check Note: Tables are available in version <b>2.1.0+</b><br> Run `pip install plotly --upgrade` to update your Plotly version ``` import plotly plotly.__version__ ``` #### Import CSV Data ``` import pandas as pd import re import plotly.plotly as py import plotly.graph_objs as go df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/Mining-BTC-180.csv') # remove min:sec:millisec from dates for i, row in enumerate(df['Date']): p = re.compile(' 00:00:00') datetime = p.split(df['Date'][i])[0] df.iloc[i, 1] = datetime table = go.Table( header=dict( values=list(df.columns), line = dict(color='rgb(50, 50, 50)'), align = ['left'] * 5, fill = dict(color='#EDFAFF') ), cells=dict( values=[df.iloc[j] for j in range(10)], line = dict(color='rgb(50, 50, 50)'), align = ['left'] * 5, fill = dict(color='#f5f5fa') ) ) py.iplot([table]) ``` #### Table and Right Aligned Plots In Plotly there is no native way to insert a Plotly Table into a Subplot. To do this, create your own `Layout` object and defining multiple `xaxis` and `yaxis` to split up the chart area into different domains. Then for the traces you wish to insert in your final chart, set their `xaxis` and `yaxis` individually to map to the domains definied in the `Layout`. See the example below to see how to align 3 Scatter plots to the right and a Table on the top. ``` import plotly.plotly as py import plotly.graph_objs as go table_trace1 = go.Table( domain=dict(x=[0, 0.5], y=[0, 1.0]), columnwidth = [30] + [33, 35, 33], columnorder=[0, 1, 2, 3, 4], header = dict(height = 50, values = [['<b>Date</b>'],['<b>Number<br>transactions</b>'], ['<b>Output-volume(BTC)</b>'], ['<b>Market-Price</b>']], line = dict(color='rgb(50, 50, 50)'), align = ['left'] * 5, font = dict(color=['rgb(45, 45, 45)'] * 5, size=14), fill = dict(color='#d562be')), cells = dict(values = [df.iloc[j][1:5] for j in range(25)], line = dict(color='#506784'), align = ['left'] * 5, font = dict(color=['rgb(40, 40, 40)'] * 5, size=12), format = [None] + [", .2f"] * 2 + [',.4f'], prefix = [None] * 2 + ['$', u'\u20BF'], suffix=[None] * 4, height = 27, fill = dict(color=['rgb(235, 193, 238)', 'rgba(228, 222, 249, 0.65)'])) ) trace1=go.Scatter( x=df['Date'], y=df['Hash-rate'], xaxis='x1', yaxis='y1', mode='lines', line=dict(width=2, color='#9748a1'), name='hash-rate-TH/s' ) trace2=go.Scatter( x=df['Date'], y=df['Mining-revenue-USD'], xaxis='x2', yaxis='y2', mode='lines', line=dict(width=2, color='#b04553'), name='mining revenue' ) trace3=go.Scatter( x=df['Date'], y=df['Transaction-fees-BTC'], xaxis='x3', yaxis='y3', mode='lines', line=dict(width=2, color='#af7bbd'), name='transact-fee' ) axis=dict( showline=True, zeroline=False, showgrid=True, mirror=True, ticklen=4, gridcolor='#ffffff', tickfont=dict(size=10) ) layout1 = dict( width=950, height=800, autosize=False, title='Bitcoin mining stats for 180 days', margin = dict(t=100), showlegend=False, xaxis1=dict(axis, **dict(domain=[0.55, 1], anchor='y1', showticklabels=False)), xaxis2=dict(axis, **dict(domain=[0.55, 1], anchor='y2', showticklabels=False)), xaxis3=dict(axis, **dict(domain=[0.55, 1], anchor='y3')), yaxis1=dict(axis, **dict(domain=[0.66, 1.0], anchor='x1', hoverformat='.2f')), yaxis2=dict(axis, **dict(domain=[0.3 + 0.03, 0.63], anchor='x2', tickprefix='$', hoverformat='.2f')), yaxis3=dict(axis, **dict(domain=[0.0, 0.3], anchor='x3', tickprefix=u'\u20BF', hoverformat='.2f')), plot_bgcolor='rgba(228, 222, 249, 0.65)', annotations=[ dict( showarrow=False, text='The last 20 records', xref='paper', yref='paper', x=0, y=1.01, xanchor='left', yanchor='bottom', font=dict(size=15) ) ] ) fig1 = dict(data=[table_trace1, trace1, trace2, trace3], layout=layout1) py.iplot(fig1) ``` #### Vertical Table and Graph Subplot ``` import plotly.plotly as py import plotly.graph_objs as go table_trace2 = go.Table( domain=dict(x=[0, 1], y=[0, 1.0]), columnwidth = [30] + [33, 35, 33], columnorder=[0, 1, 2, 3, 4], header = dict(height = 50, values = [['<b>Date</b>'],['<b>Hash Rate, TH/sec</b>'], ['<b>Mining revenue</b>'], ['<b>Transaction fees</b>']], line = dict(color='rgb(50, 50, 50)'), align = ['left'] * 5, font = dict(color=['rgb(45, 45, 45)'] * 5, size=14), fill = dict(color='#d562be')), cells = dict(values = [df['Date'][-20:], df['Hash-rate'][-20:], df['Mining-revenue-USD'][-20:], df['Transaction-fees-BTC'][-20:]], line = dict(color='#506784'), align = ['left'] * 5, font = dict(color=['rgb(40, 40, 40)'] * 5, size=12), format = [None] + [", .2f"] * 2 + [',.4f'], prefix = [None] * 2 + ['$', u'\u20BF'], suffix=[None] * 4, height = 27, fill = dict(color=['rgb(235, 193, 238)', 'rgba(228, 222, 249, 0.65)'])) ) trace4=go.Scatter( x=df['Date'], y=df['Hash-rate'], xaxis='x1', yaxis='y1', mode='lines', line=dict(width=2, color='#9748a1'), name='hash-rate-TH/s' ) trace5=go.Scatter( x=df['Date'], y=df['Mining-revenue-USD'], xaxis='x2', yaxis='y2', mode='lines', line=dict(width=2, color='#b04553'), name='mining revenue' ) trace6=go.Scatter( x=df['Date'], y=df['Transaction-fees-BTC'], xaxis='x3', yaxis='y3', mode='lines', line=dict(width=2, color='#af7bbd'), name='transact-fee' ) axis=dict( showline=True, zeroline=False, showgrid=True, mirror=True, ticklen=4, gridcolor='#ffffff', tickfont=dict(size=10) ) layout2 = dict( width=950, height=800, autosize=False, title='Bitcoin mining stats for 180 days', margin = dict(t=100), showlegend=False, xaxis1=dict(axis, **dict(domain=[0, 1], anchor='y1', showticklabels=False)), xaxis2=dict(axis, **dict(domain=[0, 1], anchor='y2', showticklabels=False)), xaxis3=dict(axis, **dict(domain=[0, 1], anchor='y3')), yaxis1=dict(axis, **dict(domain=[2 * 0.21 + 0.02 + 0.02, 0.68], anchor='x1', hoverformat='.2f')), yaxis2=dict(axis, **dict(domain=[0.21 + 0.02, 2 * 0.21 + 0.02], anchor='x2', tickprefix='$', hoverformat='.2f')), yaxis3=dict(axis, **dict(domain=[0.0, 0.21], anchor='x3', tickprefix=u'\u20BF', hoverformat='.2f')), plot_bgcolor='rgba(228, 222, 249, 0.65)', annotations=[ dict( showarrow=False, text='The last 20 records', xref='paper', yref='paper', x=0.415, y=1.01, xanchor='left', yanchor='bottom', font=dict(size=15) ) ] ) fig2 = dict(data=[table_trace2, trace4, trace5, trace6], layout=layout2) py.iplot(fig2) ``` #### Reference See https://plot.ly/python/reference/#table for more information regarding chart attributes! <br> For examples of Plotly Tables, see: https://plot.ly/python/table/ ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'table-subplots.ipynb', 'python/table-subplots/', 'Table and Chart Subplots', 'How to create a subplot with tables and charts in Python with Plotly.', title = 'Table and Chart Subplots | plotly', has_thumbnail='true', thumbnail='table_subplots.jpg', language='python', display_as='multiple_axes', order=11) ```
github_jupyter
### Name: Anjum Rohra # Overview ![it_sector-2.jpg](attachment:it_sector-2.jpg) Being a popular finance journalist of Europe, everyone is waiting for the IT Salary Survey report you release every 3 years. The IT Sector is booming and the younger aspirants keep themselves updated with the trends by the beautiful visualizations your report contains. Given the survey data from 2018 - 2020, it’s time to put your creative hat on and lay out insightful visualizations for the masses. ### Importing the required libraries ``` import plotly.express as px import pandas as pd import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Output, Input ``` ### Importing data ``` data_18 = pd.read_csv('Survey_2018.csv') data_19 = pd.read_csv('Survey_2019.csv') data_20 = pd.read_csv('Survey_2020.csv') ``` ### Imputing the missing values #### 2020 imputation ``` data_20 = data_20.rename(columns = {'Position ': 'Position'}) data_20.isnull().sum() data_20['Age'].fillna(data_20['Age'].mean(), inplace=True) data_20['Gender'].fillna('Male', inplace=True) data_20['Position'].fillna('Software Engineer', inplace=True) data_20['Position'].replace({'Account Managet': 'Account Manager', 'agile master ':'Agile Coach', 'Data analyst ':'Data Analyst', 'Data Analyst ':'Data Analyst', 'Dana Analyst':'Data Analyst', 'Fullstack engineer, ну или Software engineer': 'Fullstack engineer, Software engineer', 'Software Architekt':'Software Architect', 'DatabEngineer':'Data Engineer', 'data engineer':'Data Engineer', 'support engineer':'Support Engineer', 'Systemadministrator':'System Administrator', 'Team lead':'Team Lead', 'Tech Leader':'Tech Lead', 'Technical Lead':'Tech Lead', 'Security engineer':'Security Engineer' }, inplace=True) data_20['Your main technology / programming language'].replace({'Javascript / Typescript':'JavaScript/TypeScript','JavaScript / TypeScript':'JavaScript/TypeScript','TypeScript, JavaScript':'JavaScript/TypeScript','JavaScript/Typescript':'JavaScript/TypeScript','JavaScript / typescript':'JavaScript/TypeScript','Javascript/Typescript':'JavaScript/TypeScript','JavaScript, TypeScript':'JavaScript/TypeScript','kotlin':'Kotlin','Javascript':'JavaScript','JavaScript ':'JavaScript','Javascript ':'JavaScript','javascript':'JavaScript','Typescript':'TypeScript','typescript':'TypeScript','Typescript ':'TypeScript','python':'Python','Python ':'Python','pythin':'Python','python ':'Python','Pyrhon':'Python','scala':'Scala','Java / Scala':'Java/Scala','python, scala':'Scala / Python','C, C++':'C/C++','C++/c':'C/C++','C++, C#':'C/C++','c/c++':'C/C++','--':'Java','-':'Java'}, inplace=True) data_20['Your main technology / programming language'].fillna('Java', inplace=True) data_20['Total years of experience'].replace({'1,5':'1.5','2,5':'2.5','1 (as QA Engineer) / 11 in total':'11','15, thereof 8 as CTO':'15','6 (not as a data scientist, but as a lab scientist)':'6','383':'38.3','less than year':'0'}, inplace=True) data_20['Total years of experience'].mode() data_20['Total years of experience'].unique() data_20['Total years of experience']=data_20['Total years of experience'].astype('float64') data_20['Total years of experience'].median() data_20['Total years of experience'].fillna(data_20['Total years of experience'].median(), inplace=True) data_20['Years of experience in Germany'].unique() data_20['Years of experience in Germany'].replace({'⁰':'0','-':'10','< 1':'<1','1,7':'1.7','2,5':'2.5','0,5':'0.5','1,5':'1.5','4,5':'4.5','3,5':'3.5','4 month':'4 months','less than year':'<1'}, inplace=True) data_20['Years of experience in Germany'].mode() data_20['Years of experience in Germany'].fillna('2', inplace=True) data_20['Years of experience in Germany'].unique() data_18.drop('Timestamp',axis=1,inplace=True) data_19.drop('Zeitstempel',axis=1,inplace=True) data_20.drop('Timestamp',axis=1,inplace=True) data_20['Seniority level'].fillna('Senior', inplace=True) data_20['Other technologies/programming languages you use often'].fillna('Javascript / Typescript', inplace=True) data_20['Yearly bonus + stocks in EUR'].fillna('0', inplace=True) data_20['Annual bonus+stocks one year ago. Only answer if staying in same country'].fillna('0', inplace=True) data_20['Number of vacation days'].fillna('30', inplace=True) data_20['Employment status'].fillna('Full-time employee', inplace=True) data_20['Contract duration'].fillna('Unlimited contract', inplace=True) data_20['Main language at work'].fillna('English', inplace=True) data_20['Company size'].fillna('1000+', inplace=True) data_20['Company type'].fillna('Product', inplace=True) data_20['Have you lost your job due to the coronavirus outbreak?'].fillna('No', inplace=True) data_20['Have you received additional monetary support from your employer due to Work From Home? If yes, how much in 2020 in EUR'].fillna('0', inplace=True) data_20['Annual brutto salary (without bonus and stocks) one year ago. Only answer if staying in the same country'].fillna(data_20['Annual brutto salary (without bonus and stocks) one year ago. Only answer if staying in the same country'].median(), inplace=True) data_20['Have you been forced to have a shorter working week (Kurzarbeit)? If yes, how many hours per week'].fillna(data_20['Have you been forced to have a shorter working week (Kurzarbeit)? If yes, how many hours per week'].mean(), inplace=True) data_20.isnull().sum() ``` #### 2018 imputation ``` data_18.isnull().sum() data_18['Gender'].fillna('M', inplace=True) data_18['City'].fillna('Berlin', inplace=True) data_18['Position'].fillna('Java Developer', inplace=True) data_18['Your level'].fillna('Senior', inplace=True) data_18['Are you getting any Stock Options?'].fillna('No', inplace=True) data_18['Main language at work'].fillna('English', inplace=True) data_18['Company size'].fillna('100-1000', inplace=True) data_18['Company type'].fillna('Product', inplace=True) data_18['Age'].fillna(data_18['Age'].mean(), inplace=True) data_18['Years of experience'].fillna(data_18['Years of experience'].mean(), inplace=True) data_18['Current Salary'].fillna(data_18['Current Salary'].mean(), inplace=True) data_18['Salary one year ago'].fillna(data_18['Salary one year ago'].mean(), inplace=True) data_18['Salary two years ago'].fillna(data_18['Salary two years ago'].mean(), inplace=True) data_18['Gender'].value_counts() data_18['Gender'].replace({'M':'Male','F':'Female'}, inplace=True) data_18['Company type'].unique() data_18['Company type'].replace({'Consulting Company':'Consulting','Consultancy':'Consulting','Consulting Company':'Consulting','Consult':'Consulting','IT Consulting ':'IT Consulting','IT Consultancy ':'IT Consulting','IT Consultants':'IT Consulting','Outsorce':'Outsourcing','Outsource':'Outsourcing','E-Commerce firm':'E-Commerce','e-commerce':'E-Commerce','Ecommerce':'E-Commerce'}, inplace=True) data_18.isnull().sum() ``` #### 2019 imputation ``` data_19.drop('0',axis=1,inplace=True) data_19.isnull().sum() data_19['Seniority level'].fillna('Senior', inplace=True) data_19['Position (without seniority)'].fillna('Backend Developer', inplace=True) data_19['Your main technology / programming language'].fillna('Python', inplace=True) data_19['Main language at work'].fillna('English', inplace=True) data_19['Company name '].fillna('Zalando', inplace=True) data_19['Company size'].fillna('100-1000', inplace=True) data_19['Company type'].fillna('Product', inplace=True) data_19['Contract duration'].fillna('unlimited', inplace=True) data_19['Company business sector'].fillna('Commerce', inplace=True) data_19['Age'].fillna(data_19['Age'].mean(), inplace=True) data_19['Years of experience'].fillna(data_19['Years of experience'].mean(), inplace=True) data_19['Yearly brutto salary (without bonus and stocks)'].fillna(data_19['Yearly brutto salary (without bonus and stocks)'].mean(), inplace=True) data_19['Yearly bonus'].fillna(data_19['Yearly bonus'].mean(), inplace=True) data_19['Yearly stocks'].fillna(data_19['Yearly stocks'].mean(), inplace=True) data_19['Yearly brutto salary (without bonus and stocks) one year ago. Only answer if staying in same country'].fillna(data_19['Yearly brutto salary (without bonus and stocks) one year ago. Only answer if staying in same country'].mean(), inplace=True) data_19['Yearly bonus one year ago. Only answer if staying in same country'].fillna(data_19['Yearly bonus one year ago. Only answer if staying in same country'].mean(), inplace=True) data_19['Yearly stocks one year ago. Only answer if staying in same country'].fillna(data_19['Yearly stocks one year ago. Only answer if staying in same country'].mean(), inplace=True) data_19['Number of vacation days'].fillna(data_19['Number of vacation days'].mean(), inplace=True) data_19['Number of home office days per month'].fillna(data_19['Number of home office days per month'].mean(), inplace=True) data_19['Seniority level'].value_counts() data_19['Position (without seniority)'].unique() data_19['Your main technology / programming language'].unique() data_19['Main language at work'].unique() data_19['Company name '].unique() data_19['Company name '].replace({'google':'Google','check24':'Check24','CHECK24':'Check24','Here':'HERE'}, inplace=True) data_19['Company business sector'].unique() data_19.isnull().sum() ``` ## Data Visualization with Dash Application ``` app = dash.Dash(__name__) app.layout = html.Div([ html.Div([ dcc.Dropdown(id='years', multi=False, clearable=False, options=[{'label':x, 'value':x} for x in sorted(['2018','2019','2020'])], value="2018") ],style={'width':'50%'}), html.Div([ dcc.Dropdown(id='parameters', multi=False, clearable=False, options=[{'label':x, 'value':x} for x in sorted(['City','Gender','Seniority level','Main language at work','Current Salary'])], value="Gender") ],style={'width':'50%'}), # html.Div([ # dcc.Graph(id='my-pieplot', figure={}) # ]), html.Div([ dcc.Graph(id='my-plot', figure={}) ]) ]) # Callback - app interactivity section------------------------------------ @app.callback( #Output(component_id='my-pieplot', component_property='figure'), Output(component_id='my-plot', component_property='figure'), Input(component_id='years', component_property='value'), Input(component_id='parameters', component_property='value') ) def update_graph(year_chosen, parameter): print(year_chosen) print(parameter) if (year_chosen == '2018'): if (parameter == 'City'): city_18 = data_18['City'].value_counts().head(10) city_18 = pd.DataFrame(city_18) fig = px.bar(data_frame=city_18,y='City',color='City') fig.update_xaxes(title_text="<b>Cities</b>") fig.update_layout(title_text="<b>Respondent's City Analysis (2018)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Gender'): df = data_18 fig=px.pie(data_frame=df,names='Gender') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Gender ratio of the respondents in 2018</b>") elif (parameter == 'Seniority level'): fig=px.pie(data_frame=data_18,names='Your level') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Seniority levels in 2018</b>") elif (parameter == 'Main language at work'): language = data_18['Main language at work'].value_counts() language = pd.DataFrame(language) fig = px.bar(data_frame=language,y='Main language at work',color='Main language at work') fig.update_xaxes(title_text="<b>Languages</b>") fig.update_layout(title_text="<b>Language spoken at work (2018)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Current Salary'): experience = data_18.groupby('Years of experience') mean_salary_18 = experience['Current Salary'].mean() mean_salary_18=pd.DataFrame(mean_salary_18) mean_salary_18 = mean_salary_18.reset_index(drop=False) fig = px.line(data_frame=mean_salary_18,x='Years of experience',y='Current Salary') fig.update_layout(title_text="<b>Salary info for the year 2018</b>") elif (year_chosen == '2019'): if (parameter == 'City'): city_19 = data_19['City'].value_counts().head(10) city_19 = pd.DataFrame(city_19) fig = px.bar(data_frame=city_19,y='City',color='City') fig.update_xaxes(title_text="<b>Cities</b>") fig.update_layout(title_text="<b>Respondent's City Analysis (2019)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Gender'): df = data_19 fig=px.pie(data_frame=df,names='Gender') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Gender ratio of the respondents in 2019</b>") elif (parameter == 'Seniority level'): fig=px.pie(data_frame=data_19,names='Seniority level') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Seniority levels in 2019</b>") elif (parameter == 'Main language at work'): language = data_19['Main language at work'].value_counts() language = pd.DataFrame(language) fig = px.bar(data_frame=language,y='Main language at work',color='Main language at work') fig.update_xaxes(title_text="<b>Languages</b>") fig.update_layout(title_text="<b>Language spoken at work (2019)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Current Salary'): experience = data_19.groupby('Years of experience') mean_salary_19 = experience['Yearly brutto salary (without bonus and stocks)'].mean() mean_salary_19=pd.DataFrame(mean_salary_19) mean_salary_19 = mean_salary_19.reset_index(drop=False) fig = px.line(data_frame=mean_salary_19,x='Years of experience',y='Yearly brutto salary (without bonus and stocks)') fig.update_layout(title_text="<b>Salary info for the year 2019</b>") elif (year_chosen == '2020'): if (parameter == 'City'): city_20 = data_20['City'].value_counts().head(10) city_20 = pd.DataFrame(city_20) fig = px.bar(data_frame=city_20,y='City',color='City') fig.update_xaxes(title_text="<b>Cities</b>") fig.update_layout(title_text="<b>Respondent's City Analysis (2020)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Gender'): df = data_20 fig=px.pie(data_frame=df,names='Gender') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Gender ratio of the respondents in 2020</b>") elif (parameter == 'Seniority level'): fig=px.pie(data_frame=data_20,names='Seniority level') fig.update_traces(textinfo = 'label+percent') fig.update_layout(title_text="<b>Seniority levels in 2020</b>") elif (parameter == 'Main language at work'): language = data_20['Main language at work'].value_counts() language = pd.DataFrame(language) fig = px.bar(data_frame=language,y='Main language at work',color='Main language at work') fig.update_xaxes(title_text="<b>Languages</b>") fig.update_layout(title_text="<b>Language spoken at work (2020)</b>") fig.update_yaxes(title_text="<b>Employee count</b> ", secondary_y=False) elif (parameter == 'Current Salary'): experience = data_20.groupby('Total years of experience') mean_salary_20 = experience['Yearly brutto salary (without bonus and stocks) in EUR'].mean() mean_salary_20=pd.DataFrame(mean_salary_20) mean_salary_20 = mean_salary_20.reset_index(drop=False) fig = px.line(data_frame=mean_salary_20,x='Total years of experience',y='Yearly brutto salary (without bonus and stocks) in EUR') fig.update_layout(title_text="<b>Salary info for the year 2020</b>") return fig if __name__=='__main__': app.run_server(debug=False, port=8001) ```
github_jupyter
``` %matplotlib inline import glob import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.contrib.learn as skflow from sklearn import metrics datadir='/home/bonnin/dev/cifar-10-batches-bin/' plt.ion() G = glob.glob (datadir + '*.bin') A = np.fromfile(G[0],dtype=np.uint8).reshape([10000,3073]) labels = A [:,0] images = A [:,1:].reshape([10000,3,32,32]).transpose (0,2,3,1) plt.imshow(images[15]) print labels[11] images_unroll = A [:,1:] ''' Convert classes labels from escalars to one-hot vectrors.''' def dense_to_one_hot (labels_dense, num_classes=10): num_labels = labels_dense.shape [0] index_offset = np.arange (num_labels) * num_classes labels_one_hot = np.zeros ((num_labels,num_classes)) labels_one_hot.flat [index_offset +labels_dense.ravel()]=1 return labels_one_hot labels_hot = dense_to_one_hot(labels, num_classes= 10) sess= tf.InteractiveSession() #classifier=skflow.TensorFlowLinearClassifier(n_classes= 10, batch_size= 100, steps= 1000, learning_rate=0.1) #classifier.fit(images_unroll,labels) #score = metrics.accuracy_score (labels, classifier.predict (images_unroll)) #print ('Accuracy: {0:f}',format(score)) #W = classifier.weights_ #sx.sy = (16,32) #f.con = plt. subplots(sx,sy, sharex='col', sharey='row') #for xx in range(sx): # for yy in range(sy): # con [xx.yy].pcolormesh #need to read in data in [n_samples. Height. Widht. #so element A [0.0.0] 3 RGB bytes def max_pool_2x2(tensor_in): return tf.nn.max_pool(tensor_in, ksize= [1,2,2,1], strides= [1,2,2,1], padding='SAME') def conv_model (X, y): X= tf. reshape(X, [-1, 32, 32, 3]) with tf.variable_scope('conv_layer1'): h_conv1=skflow.ops.conv2d(X, n_filters=16, filter_shape=[5,5], bias=True, activation=tf.nn.relu)#print (h_conv1) h_pool1=max_pool_2x2(h_conv1)#print (h_pool1) with tf.variable_scope('conv_layer2'): h_conv2=skflow.ops.conv2d(h_pool1, n_filters=16, filter_shape=[5,5], bias=True, activation=tf.nn.relu) #print (h_conv2) h_pool2=max_pool_2x2(h_conv2) #print (h_pool2) #needs work h_pool2_flat = tf.reshape(h_pool2, [-1,8*8*16 ]) h_fc1 = skflow.ops.dnn(h_pool2_flat, [96,48], activation=tf.nn.relu , dropout=0.5) return skflow.models.logistic_regression(h_fc1,y) images = np.array(images,dtype=np.float32) classifier = skflow.TensorFlowEstimator(model_fn=conv_model, n_classes=10, batch_size=100, steps=2000, learning_rate=0.01) %time classifier.fit(images, labels, logdir='/tmp/cnn_train/') %time score =metrics.accuracy_score(labels, classifier.predict(images)) print ('Accuracy: {0:f}'.format(score)) #Examining fitted weights #First 'onvolutional Layer print ('1st Convolutional Layer weights and Bias') print (classifier.get_tensor_value('conv_layer1/convolution/filters:0')) print (classifier.get_tensor_value('conv_layer1/convolution/filters:1')) ```
github_jupyter
# Dask Overview Dask is a flexible library for parallel computing in Python that makes scaling out your workflow smooth and simple. On the CPU, Dask uses Pandas (NumPy) to execute operations in parallel on DataFrame (array) partitions. Dask-cuDF extends Dask where necessary to allow its DataFrame partitions to be processed by cuDF GPU DataFrames as opposed to Pandas DataFrames. For instance, when you call dask_cudf.read_csv(…), your cluster’s GPUs do the work of parsing the CSV file(s) with underlying cudf.read_csv(). Dask also supports array based workflows using CuPy. ## When to use Dask If your workflow is fast enough on a single GPU or your data comfortably fits in memory on a single GPU, you would want to use cuDF or CuPy. If you want to distribute your workflow across multiple GPUs, have more data than you can fit in memory on a single GPU, or want to analyze data spread across many files at once, you would want to use Dask. One additional benefit Dask provides is that it lets us easily spill data between device and host memory. This can be very useful when we need to do work that would otherwise cause out of memory errors. In this brief notebook, you'll walk through an example of using Dask on a single GPU. Because we're using Dask, the same code in this notebook would work on two, eight, 16, or 100s of GPUs. # Creating a Local Cluster The easiest way to scale workflows on a single node is to use the `LocalCUDACluster` API. This lets us create a GPU cluster, using one worker per GPU by default. In this case, we'll pass the following arguments. - `CUDA_VISIBLE_DEVICES`, to limit our cluster to a single GPU (for demonstration purposes). - `device_memory_limit`, to illustrate how we can spill data between GPU and CPU memory. Artificial memory limits like this reduce our performance if we don't actually need them, but can let us accomplish much larger tasks when we do. - `rmm_pool_size`, to use the RAPIDS Memory Manager to allocate one big chunk of memory upfront rather than having our operations call `cudaMalloc` all the time under the hood. This improves performance, and is generally a best practice. ``` from dask.distributed import Client, fire_and_forget, wait from dask_cuda import LocalCUDACluster from dask.utils import parse_bytes import dask cluster = LocalCUDACluster( CUDA_VISIBLE_DEVICES="0,1", device_memory_limit=parse_bytes("3GB"), rmm_pool_size=parse_bytes("16GB"), ) client = Client(cluster) client ``` Click the **Dashboard** link above to view your Dask dashboard. ## cuDF DataFrames to Dask DataFrames Dask lets scale our cuDF workflows. We'll walk through a couple of examples below, and then also highlight how Dask lets us spill data from GPU to CPU memory. First, we'll create a dataframe with CPU Dask and then send it to the GPU ``` import cudf import dask_cudf ddf = dask_cudf.from_dask_dataframe(dask.datasets.timeseries()) ddf.head() ``` ### Example One: Groupby-Aggregations ``` ddf.groupby(["id", "name"]).agg({"x":['sum', 'mean']}).head() ``` Run the code above again. If you look at the task stream in the dashboard, you'll notice that we're creating the data every time. That's because Dask is lazy. We need to `persist` the data if we want to cache it in memory. ``` ddf = ddf.persist() wait(ddf); ddf.groupby(["id", "name"]).agg({"x":['sum', 'mean']}).head() ``` This is the same API as cuDF, except it works across many GPUs. ### Example Two: Rolling Windows We can also do things like rolling window calculations with Dask and GPUs. ``` ddf.head() rolling = ddf[['x','y']].rolling(window=3) type(rolling) rolling.mean().head() ``` ## Larger than GPU Memory Workflows What if we needed to scale up even more, but didn't have enough GPU memory? Dask handles spilling for us, so we don't need to worry about it. The `device_memory_limit` parameter we used while creating the LocalCluster determines when we should start spilling. In this case, we'll start spilling when we've used about 4GB of GPU memory. Let's create a larger dataframe to use as an example. ``` ddf = dask_cudf.from_dask_dataframe(dask.datasets.timeseries(start="2000-01-01", end="2003-12-31", partition_freq='60d')) ddf = ddf.persist() len(ddf) print(f"{ddf.memory_usage(deep=True).sum().compute() / 1e9} GB of data") ddf.head() ``` Let's imagine we have some downstream operations that require all the data from a given unique identifier in the same partition. We can repartition our data based on the `name` column using the `shuffle` API. Repartitioning our large dataframe will spike GPU memory higher than 4GB, so we'll need to spill to CPU memory. ``` ddf = ddf.shuffle(on="id") ddf = ddf.persist() len(ddf) ``` Watch the Dask Dashboard while this runs. You should see a lot of tasks in the stream like `disk-read` and `disk-write`. Setting a `device_memory_limit` tells dask to spill to CPU memory and potentially disk (if we overwhelm CPU memory). This lets us do these large computations even when we're almost out of memory (though in this case, we faked it). # Dask Custom Functions Dask DataFrames also provide a `map_partitions` API, which is very useful for parallelizing custom logic that doesn't quite fit perfectly or doesn't need to be used with the Dask dataframe API. Dask will `map` the function to every partition of the distributed dataframe. Now that we have all the rows of each `id` collected in the same partitions, what if we just wanted to sort **within each partition**. Avoiding global sorts is usually a good idea if possible, since they're very expensive operations. ``` sorted_ddf = ddf.map_partitions(lambda x: x.sort_values("id")) len(sorted_ddf) ``` We could also do something more complicated and wrap it into a function. Let's do a rolling window on the two value columns after sorting by the id column. ``` def sort_and_rolling_mean(df): df = df.sort_values("id") df = df.rolling(3)[["x", "y"]].mean() return df result = ddf.map_partitions(sort_and_rolling_mean) result = result.persist() wait(result); # let's look at a random partition result.partitions[12].head() ``` Pretty cool. When we're using `map_partitions`, the function is executing on the individual cuDF DataFrames that make up our Dask DataFrame. This means we can do any cuDF operation, run CuPy array manipulations, or anything else we want. # Dask Delayed Dask also provides a `delayed` API, which is useful for parallelizing custom logic that doesn't quite fit into the DataFrame API. Let's imagine we wanted to run thousands of regressions models on different combinations of two features. We can do this experiment super easily with dask.delayed. ``` from cuml.linear_model import LinearRegression from dask import delayed import dask import numpy as np from itertools import combinations # Setup data np.random.seed(12) nrows = 1000000 ncols = 50 df = cudf.DataFrame({f"x{i}": np.random.randn(nrows) for i in range(ncols)}) df['y'] = np.random.randn(nrows) feature_combinations = list(combinations(df.columns.drop("y"), 2)) feature_combinations[:10] len(feature_combinations) # Many calls to linear regression, parallelized with Dask @delayed def fit_ols(df, feature_cols, target_col="y"): clf = LinearRegression() clf.fit(df[list(feature_cols)], df[target_col]) return feature_cols, clf.coef_, clf.intercept_ # scatter the data to the workers beforehand data_future = client.scatter(df, broadcast=True) results = [] for features in feature_combinations: # note how i'm passing the scattered data future res = fit_ols(data_future, features) results.append(res) res = dask.compute(results) res = res[0] print("Features\t\tCoefficients\t\t\tIntercept") for i in range(5): print(res[i][0], res[i][1].values, res[i][2], sep="\t") ``` # Handling Parquet Files Dask and cuDF provide accelerated Parquet readers and writers, and it's useful to take advantage of these tools. To start, let's write out our DataFrame `ddf` to Parquet files using the `to_parquet` API and delete it from memory. ``` print(ddf.npartitions) ddf.to_parquet("ddf.parquet") del ddf ``` Let's take a look at what happened. ``` !ls ddf.parquet | head ``` We end up with many parquet files, and one metadata file. Dask will write one file per partition. Let's read the data back in with `dask_cudf.read_parquet`. ``` ddf = dask_cudf.read_parquet("ddf.parquet/") ddf ``` Why do we have more partitions than files? It turns out, Dask's readers do things like chunk our data by default. Additionally, the `_metadata` file helps provide guidelines for reading the data. But, we can still read them on a per-file basis if want by using a `*` wildcard in the filepath and ignoring the metadata. ``` ddf = dask_cudf.read_parquet("ddf.parquet/*.parquet") ddf ``` Let's now write one big parquet file and then read it back in. We can `repartition` our dataset down to a single partition. ``` ddf.repartition(npartitions=1).to_parquet("big_ddf.parquet") dask_cudf.read_parquet("big_ddf.parquet/") ``` We still get lots of partitions? We can control the splitting behavior using the `split_row_groups` parameter. ``` dask_cudf.read_parquet("big_ddf.parquet/", split_row_groups=False) ``` In general, we want to avoid massive partitions. The sweet spot is probably around 2-3 GB of data per partition for a 32GB V100. # Understanding Persist and Compute Before we close, it's worth coming back to the concepts of `persist` and `compute`. We've seen them several times, but haven't gone into depth. Most Dask operations are lazy. This is a common pattern in distributed computing, but is likely unfamiliar to those who primarily use single-machine libraries like pandas and cuDF. As a result, you'll usually need to call an **eager** operation like `len` or `persist` to actually trigger work. In general, you should avoid calling `compute` except when collecting small datasets or scalars. When we spin up a cluster, we're interacting with our cluster in what we call the `Client` Python process. When we created a `Client` object above, this is what we did. Calling `compute` brings all of the results back to a single GPU cuDF DataFrame in the client process, not in any of the worker processes. This means we're not using the same memory pool, so we could go out of memory if we're not careful. For those of you with Spark experience, you can think of `persist` as triggering work and caching the dataframe in distributed memory and `compute` as collecting the data or results into a single GPU dataframe (cuDF) on the driver. ### Should I Persist My Data? Persisting is generally a good idea if the data needs to be accessed multiple times, to avoid repeated computation. However, if the size of your data would lead to memory pressure, this could cause spilling, which hurts performance. As a best practice, we recommend persisting only when necessary or when you're using an eager operation in the middle of your workflow (to avoid repeating computation). Note that calling `df.head` is an eager operation, which will trigger some computation. If you're going to be doing exploratory data analysis or visually inspecting the data, you would want to persist beforehand. # Summary RAPIDS lets us scale up and take advantage of GPU acceleration. Dask lets us scale out to multiple machines. Dask supports both cuDF DataFrames and CuPy arrays, with generally the same APIs as the single-machine libraries. We encourage you to read the Dask [documentation](https://docs.dask.org/en/latest/) to learn more, and also look at our [10 Minute Guide to cuDF and Dask cuDF](https://docs.rapids.ai/api/cudf/nightly/10min.html)
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/distance.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/distance.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/distance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('Installing geemap ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) import ee import geemap ``` ## Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ``` Map = geemap.Map(center=[40,-100], zoom=4) Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset # Collection.distance example. # Computes the distance to the nearest feature in a collection. # Construct a FeatureCollection from a list of geometries. fc = ee.FeatureCollection([ ee.Geometry.Point(-72.94411, 41.32902), ee.Geometry.Point(-72.94411, 41.33402), ee.Geometry.Point(-72.94411, 41.33902), # The geometries do not need to be the same type. ee.Geometry.LineString( -72.93411, 41.30902, -72.93411, 41.31902, -72.94411, 41.31902) ]) # Compute distance from the dfeatures, to a max of 1000 meters. distance = fc.distance(1000, 100) Map.setCenter(-72.94, 41.32, 13) Map.addLayer(distance, {'min': 0, 'max': 1000, 'palette': ['yellow', 'red']}, 'distance') Map.addLayer(fc, {}, 'Features') ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Hypothesis: Are digitised practices causing more failures? ## Hypothesis We believe that practices undergoing Lloyd Gerge digitisation have an increased failure rate. We will know this to be true when we look at their data for the last three months, and see that either their failures have increased, or that in general their failures are higher than average. ## Context From the months of May-Aug 2021, we see a steady increase of TPP-->EMIS Large message general failures. A general hypothesis is that this is due to record sizes increasing, which could be due to Lloyd George digitisation. This has prompted a more general hypothesis to identify whether digitisation is impacting failure rates. ## Scope - Generate a transfer outcomes table for each of the below CCGs split down for May, June, July: - Sunderland - Fylde and Wyre - Chorley and South Ribble - Blackpool - Birmingham and Solihull - Show technical failure rate for each month, for each practice in the CCG - Separate out outcomes for transfers in, and transfers out - Do this for practices as a sender and as a requester ``` import pandas as pd import numpy as np import paths from data.practice_metadata import read_asid_metadata asid_lookup=read_asid_metadata("prm-gp2gp-ods-metadata-preprod", "v2/2021/8/organisationMetadata.json") transfer_file_location = "s3://prm-gp2gp-transfer-data-preprod/v4/" transfer_files = [ "2021/5/transfers.parquet", "2021/6/transfers.parquet", "2021/7/transfers.parquet" ] transfer_input_files = [transfer_file_location + f for f in transfer_files] transfers_raw = pd.concat(( pd.read_parquet(f) for f in transfer_input_files )) transfers = transfers_raw\ .join(asid_lookup.add_prefix("requesting_"), on="requesting_practice_asid", how="left")\ .join(asid_lookup.add_prefix("sending_"), on="sending_practice_asid", how="left")\ transfers['month']=transfers['date_requested'].dt.to_period('M') def generate_monthly_outcome_breakdown(transfers, columns): total_transfers = ( transfers .groupby(columns) .size() .to_frame("Total Transfers") ) transfer_outcomes=pd.pivot_table( transfers, index=columns, columns=["status"], aggfunc='size' ) transfer_outcomes_pc = ( transfer_outcomes .div(total_transfers["Total Transfers"],axis=0) .multiply(100) .round(2) .add_suffix(" %") ) failed_transfers = ( transfers .assign(failed_transfer=transfers["status"] != "INTEGRATED_ON_TIME") .groupby(columns) .agg({'failed_transfer': 'sum'}) .rename(columns={'failed_transfer': 'ALL_FAILURE'}) ) failed_transfers_pc = ( failed_transfers .div(total_transfers["Total Transfers"],axis=0) .multiply(100) .round(2) .add_suffix(" %") ) return pd.concat([ total_transfers, transfer_outcomes, failed_transfers, transfer_outcomes_pc, failed_transfers_pc, ],axis=1).fillna(0) ``` ## Generate national transfer outcomes ``` national_metrics_monthly=generate_monthly_outcome_breakdown(transfers, ["month"]) national_metrics_monthly ``` ## Generate digitised CCG transfer outcomes ``` ccgs_to_investigate = [ "NHS SUNDERLAND CCG", 'NHS FYLDE AND WYRE CCG', 'NHS CHORLEY AND SOUTH RIBBLE CCG', 'NHS BLACKPOOL CCG', 'NHS BIRMINGHAM AND SOLIHULL CCG' ] is_requesting_ccg_of_interest = transfers.requesting_ccg_name.isin(ccgs_to_investigate) is_sending_ccg_of_interest = transfers.sending_ccg_name.isin(ccgs_to_investigate) requesting_transfers_of_interest = transfers[is_requesting_ccg_of_interest] sending_transfers_of_interest = transfers[is_sending_ccg_of_interest] ``` ### Requesting CCGs (Digitised) ``` requesting_ccgs_monthly=generate_monthly_outcome_breakdown( transfers=requesting_transfers_of_interest, columns=["requesting_ccg_name", "month"] ) requesting_ccgs_monthly ``` ### Sending CCGs (Digitised) ``` sending_ccgs_monthly=generate_monthly_outcome_breakdown( transfers=sending_transfers_of_interest, columns=["sending_ccg_name", "month"] ) sending_ccgs_monthly ``` ### Requesting practices (digitised) ``` requesting_practices_monthly=generate_monthly_outcome_breakdown( transfers=requesting_transfers_of_interest, columns=["requesting_ccg_name", "requesting_practice_name", "requesting_practice_ods_code", "requesting_supplier", "month"] ) requesting_practices_monthly ``` ### Sending practices (digitised) ``` sending_practices_monthly=generate_monthly_outcome_breakdown( transfers=sending_transfers_of_interest, columns=["sending_ccg_name", "sending_practice_name", "sending_practice_ods_code", "sending_supplier", "month"] ) sending_practices_monthly ``` ## Looking at failure rate trends by CCG when requesting a record ``` barplot_config = { 'color': ['lightsteelblue', 'cornflowerblue', 'royalblue'], 'edgecolor':'black', 'kind':'bar', 'figsize': (15,6), 'rot': 30 } def requesting_ccg_barplot(column_name, title): ( pd .concat({'All CCGs': national_metrics_monthly}, names=['requesting_ccg_name']) .append(requesting_ccgs_monthly) .unstack() .plot( y=column_name, title=title, **barplot_config ) ) requesting_ccg_barplot('ALL_FAILURE %', 'Total Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('TECHNICAL_FAILURE %', 'Technical Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('PROCESS_FAILURE %', 'Process Failure Percentage (Digitised CCGs - Requesting)') requesting_ccg_barplot('UNCLASSIFIED_FAILURE %', 'Unlassified Failure Percentage (Digitised CCGs - Requesting)') ``` ## Looking at failure rate trends by CCG when sending a record ``` def sending_ccg_barplot(column_name, title): ( pd .concat({'All CCGs': national_metrics_monthly}, names=['sending_ccg_name']) .append(sending_ccgs_monthly) .unstack() .plot( y=column_name, title=title, **barplot_config ) ) sending_ccg_barplot('ALL_FAILURE %', 'Total Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('TECHNICAL_FAILURE %', 'Technical Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('PROCESS_FAILURE %', 'Process Failure Percentage (Digitised CCGs - Sending)') sending_ccg_barplot('UNCLASSIFIED_FAILURE %', 'Unlassified Failure Percentage (Digitised CCGs - Sending)') ``` ## Write CCG transfer outcomes by sending and requesting practice to Excel ``` with pd.ExcelWriter('PRMT-2332-Digitisation-Failure-Rates-May-July-2021.xlsx') as writer: national_metrics_monthly.to_excel(writer, sheet_name="National Baseline") requesting_ccgs_monthly.to_excel(writer, sheet_name="Digitised CCGs (Req)") sending_ccgs_monthly.to_excel(writer, sheet_name="Digitised CCGs (Send)") requesting_practices_monthly.to_excel(writer, sheet_name="Digitised Practices (Req)") sending_practices_monthly.to_excel(writer, sheet_name="Digitised Practices (Send)") ```
github_jupyter
# Data Collection Using Web Scraping ## To solve this problem we will need the following data : ● List of neighborhoods in Pune. ● Latitude and Longitudinal coordinates of those neighborhoods. ● Venue data for each neighborhood. ## Sources ● For the list of neighborhoods, I used (https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune) ● For Latitude and Longitudinal coordinates: Python Geocoder Package (https://geocoder.readthedocs.io/) ● For Venue data: Foursquare API (https://foursquare.com/) ## Methods to extract data from Sources To extract the data we will use python packages like requests, beautifulsoup and geocoder. We will use Requests and beautifulsoup packages for web scraping(https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune ) to get the list of neighborhoods in Pune and geocoder package to get the latitude and longitude coordinates of each neighborhood. Then we will use Folium to plot these neighborhoods on the map. After that, we will use the foursquare API to get the venue data of those neighborhoods. Foursquare API will provide many categories of the venue data but we are particularly interested in the supermarket category in order to help us to solve the business problem. ## Imports ``` import numpy as np # library to handle data in a vectorized manner import pandas as pd # library for data analsysis pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) import json # library to handle JSON files from geopy.geocoders import Nominatim # convert an address into latitude and longitude values !pip install geocoder import geocoder # to get coordinates !pip install requests import requests # library to handle requests from bs4 import BeautifulSoup # library to parse HTML and XML documents from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe print("Libraries imported.") ``` ## Collecting the nebourhood data using Requests, BeautifulSoup, and Geocoder labries ``` data = requests.get("https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Pune").text # parse data from the html into a beautifulsoup object soup = BeautifulSoup(data, 'html.parser') # create a list to store neighborhood data neighborhood_List = [] # append the data into the list for row in soup.find_all("div", class_="mw-category")[0].findAll("li"): neighborhood_List.append(row.text) # create a new DataFrame from the list Pune_df = pd.DataFrame({"Neighborhood": neighborhood_List}) Pune_df.tail() # define a function to get coordinates def get_cord(neighborhood): coords = None # loop until you get the coordinates while(coords is None): g = geocoder.arcgis('{}, Pune, Maharashtra'.format(neighborhood)) coords = g.latlng return coords # create a list and store the coordinates coords = [ get_cord(neighborhood) for neighborhood in Pune_df["Neighborhood"].tolist() ] coords[:10] df_coords = pd.DataFrame(coords, columns=['Latitude', 'Longitude']) # merge the coordinates into the original dataframe Pune_df['Latitude'] = df_coords['Latitude'] Pune_df['Longitude'] = df_coords['Longitude'] # check the neighborhoods and the coordinates print(Pune_df.shape) Pune_df.head(10) # save the DataFrame as CSV file Pune_df.to_csv("Pune_df.csv", index=False) ``` ## Collecting the nebourhood venue data using Foursquare API ``` # define Foursquare Credentials and Version CLIENT_ID = '5HUDVH14DMECWUAFI2MICONBTTDPW1CCL1C4TFGE3FEHEUHJ' # your Foursquare ID CLIENT_SECRET = 'R0WIH5UIW2SADKBUW4B4WMY2QWBBT0Q02IURAXQXVJZMTDIV' # your Foursquare Secret VERSION = '20180605' # Foursquare API version print('Your credentails:') print('CLIENT_ID: ' + CLIENT_ID) print('CLIENT_SECRET:' + CLIENT_SECRET) radius = 3000 LIMIT = 150 venues = [] for lat, long, neighborhood in zip(Pune_df['Latitude'], Pune_df['Longitude'], Pune_df['Neighborhood']): # create the API request URL url = "https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}".format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, long, radius, LIMIT) # make the GET request results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue for venue in results: venues.append(( neighborhood, lat, long, venue['venue']['name'], venue['venue']['location']['lat'], venue['venue']['location']['lng'], venue['venue']['categories'][0]['name'])) # convert the venues list into a new DataFrame venues_df = pd.DataFrame(venues) # define the column names venues_df.columns = ['Neighborhood', 'Latitude', 'Longitude', 'VenueName', 'VenueLatitude', 'VenueLongitude', 'VenueCategory'] print(venues_df.shape) venues_df.head() print('There are {} uniques categories.'.format(len(venues_df['VenueCategory'].unique()))) # print out the list of categories venues_df['VenueCategory'].unique() venues_df.to_csv("venues_df.csv") ```
github_jupyter
# Tabulate results ``` import os import sys from typing import Tuple import pandas as pd from tabulate import tabulate from tqdm import tqdm sys.path.append('../src') from read_log_file import read_log_file LOG_HOME_DIR = os.path.join('../logs_v1/') assert os.path.isdir(LOG_HOME_DIR) MODEL_NAMES = ['logistic_regression', 'transformer_encoder', 'bert-base-uncased', 'bert-base-multilingual-cased'] SETUPS = ['zero', 'few50', 'few100', 'few150', 'few200', 'full', 'trg'] def get_best_score_from_dict(di: dict) -> dict: """Get max value from a dict""" keys_with_max_val = [] # find max value max_val = -float('inf') for k, v in di.items(): if v > max_val: max_val = v # find all keys with max value for k, v in di.items(): if v == max_val: keys_with_max_val.append(k) return { 'k': keys_with_max_val, 'v': max_val, } def create_best_results_df(langs: str) -> Tuple[pd.DataFrame, pd.DataFrame]: results_dict = {} for model_name in MODEL_NAMES: results_dict[model_name] = {} log_dir = os.path.join(LOG_HOME_DIR, langs, model_name) log_filenames = os.listdir(log_dir) for fname in log_filenames: results_dict[model_name][fname] = read_log_file( log_file_path=os.path.join(log_dir, fname), plot=False, verbose=False, )['best_val_metrics']['f1'] best_results_dict = {'Setup': SETUPS} best_hparams_dict = {'Setup': SETUPS} best_results_dict.update({model_name: [] for model_name in MODEL_NAMES}) best_hparams_dict.update({model_name: [] for model_name in MODEL_NAMES}) for model_name in MODEL_NAMES: for setup in SETUPS: best_score = get_best_score_from_dict( {k: v for k, v in results_dict[model_name].items() if k.startswith(f'{setup}_')} ) best_results_dict[model_name].append( best_score['v'] ) best_hparams_dict[model_name].append( best_score['k'] ) best_results_df = pd.DataFrame(best_results_dict) best_hparams_df = pd.DataFrame(best_hparams_dict) return best_results_df, best_hparams_df def highlight_best_score(df: pd.DataFrame) -> pd.DataFrame: """Highlight best score in each row""" return df.style.apply(lambda x: ['background: red' if isinstance(v, float) and v == max(x.iloc[1:]) else '' for v in x], axis=1) def tabulate_markdown(df: pd.DataFrame) -> str: """Tabulate in markdown format and bold best scores in each row""" df = df.round(4) for model_name in MODEL_NAMES: df[model_name] = df[model_name].astype(str) for idx in range(len(df)): max_val = max(float(df.iloc[idx][model_name]) for model_name in MODEL_NAMES) for model_name in MODEL_NAMES: cell_val = float(df.iloc[idx][model_name]) if cell_val == max_val: df.at[idx, model_name] = f'**{cell_val}**' else: df.at[idx, model_name] = f'{cell_val}' return tabulate(df, headers='keys', showindex=False, tablefmt='github') best_results_dfs_dict = {} best_hparams_dfs_dict = {} for langs in tqdm(['enbg', 'enar', 'bgen', 'bgar', 'aren', 'arbg']): best_results_dfs_dict[langs], best_hparams_dfs_dict[langs] = create_best_results_df(langs) ``` ## en-bg ``` highlight_best_score(best_results_dfs_dict['enbg']) print(tabulate_markdown(best_results_dfs_dict['enbg'])) best_hparams_dfs_dict['enbg'] ``` ## en-ar ``` highlight_best_score(best_results_dfs_dict['enar']) print(tabulate_markdown(best_results_dfs_dict['enar'])) best_hparams_dfs_dict['enar'] ``` ## bg-en ``` highlight_best_score(best_results_dfs_dict['bgen']) print(tabulate_markdown(best_results_dfs_dict['bgen'])) best_hparams_dfs_dict['bgen'] ``` ## bg-ar ``` highlight_best_score(best_results_dfs_dict['bgar']) print(tabulate_markdown(best_results_dfs_dict['bgar'])) best_hparams_dfs_dict['bgar'] ``` ## ar-en ``` highlight_best_score(best_results_dfs_dict['aren']) print(tabulate_markdown(best_results_dfs_dict['aren'])) best_hparams_dfs_dict['aren'] ``` ## ar-bg ``` highlight_best_score(best_results_dfs_dict['arbg']) print(tabulate_markdown(best_results_dfs_dict['arbg'])) best_hparams_dfs_dict['arbg'] ```
github_jupyter
<a href="https://colab.research.google.com/github/PacktPublishing/Hands-On-Computer-Vision-with-PyTorch/blob/master/Chapter15/Handwriting_transcription.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !wget https://www.dropbox.com/s/l2ul3upj7dkv4ou/synthetic-data.zip !unzip -qq synthetic-data.zip !pip install torch_snippets torch_summary editdistance from torch_snippets import * from torchsummary import summary import editdistance device = 'cuda' if torch.cuda.is_available() else 'cpu' fname2label = lambda fname: stem(fname).split('@')[0] images = Glob('synthetic-data') vocab = 'QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm' B,T,V = 64, 32, len(vocab) H,W = 32, 128 class OCRDataset(Dataset): def __init__(self, items, vocab=vocab, preprocess_shape=(H,W), timesteps=T): super().__init__() self.items = items self.charList = {ix+1:ch for ix,ch in enumerate(vocab)} self.charList.update({0: '`'}) self.invCharList = {v:k for k,v in self.charList.items()} self.ts = timesteps def __len__(self): return len(self.items) def sample(self): return self[randint(len(self))] def __getitem__(self, ix): item = self.items[ix] image = cv2.imread(item, 0) label = fname2label(item) return image, label def collate_fn(self, batch): images, labels, label_lengths, label_vectors, input_lengths = [], [], [], [], [] for image, label in batch: images.append(torch.Tensor(self.preprocess(image))[None,None]) label_lengths.append(len(label)) labels.append(label) label_vectors.append(self.str2vec(label)) input_lengths.append(self.ts) images = torch.cat(images).float().to(device) label_lengths = torch.Tensor(label_lengths).long().to(device) label_vectors = torch.Tensor(label_vectors).long().to(device) input_lengths = torch.Tensor(input_lengths).long().to(device) return images, label_vectors, label_lengths, input_lengths, labels def str2vec(self, string, pad=True): string = ''.join([s for s in string if s in self.invCharList]) val = list(map(lambda x: self.invCharList[x], string)) if pad: while len(val) < self.ts: val.append(0) return val def preprocess(self, img, shape=(32,128)): target = np.ones(shape)*255 try: H, W = shape h, w = img.shape fx = H/h fy = W/w f = min(fx, fy) _h = int(h*f) _w = int(w*f) _img = cv2.resize(img, (_w,_h)) target[:_h,:_w] = _img except: ... return (255-target)/255 def decoder_chars(self, pred): decoded = "" last = "" pred = pred.cpu().detach().numpy() for i in range(len(pred)): k = np.argmax(pred[i]) if k > 0 and self.charList[k] != last: last = self.charList[k] decoded = decoded + last elif k > 0 and self.charList[k] == last: continue else: last = "" return decoded.replace(" "," ") def wer(self, preds, labels): c = 0 for p, l in zip(preds, labels): c += p.lower().strip() != l.lower().strip() return round(c/len(preds), 4) def cer(self, preds, labels): c, d = [], [] for p, l in zip(preds, labels): c.append(editdistance.eval(p, l) / len(l)) return round(np.mean(c), 4) def evaluate(self, model, ims, labels, lower=False): model.eval() preds = model(ims).permute(1,0,2) # B, T, V+1 preds = [self.decoder_chars(pred) for pred in preds] return {'char-error-rate': self.cer(preds, labels), 'word-error-rate': self.wer(preds, labels), 'char-accuracy' : 1 - self.cer(preds, labels), 'word-accuracy' : 1 - self.wer(preds, labels)} from sklearn.model_selection import train_test_split trn_items, val_items = train_test_split(Glob('synthetic-data'), test_size=0.2, random_state=22) trn_ds = OCRDataset(trn_items) val_ds = OCRDataset(val_items) trn_dl = DataLoader(trn_ds, batch_size=B, collate_fn=trn_ds.collate_fn, drop_last=True, shuffle=True) val_dl = DataLoader(val_ds, batch_size=B, collate_fn=val_ds.collate_fn, drop_last=True) from torch_snippets import Reshape, Permute class BasicBlock(nn.Module): def __init__(self, ni, no, ks=3, st=1, padding=1, pool=2, drop=0.2): super().__init__() self.ks = ks self.block = nn.Sequential( nn.Conv2d(ni, no, kernel_size=ks, stride=st, padding=padding), nn.BatchNorm2d(no, momentum=0.3), nn.ReLU(inplace=True), nn.MaxPool2d(pool), nn.Dropout2d(drop) ) def forward(self, x): return self.block(x) class Ocr(nn.Module): def __init__(self, vocab): super().__init__() self.model = nn.Sequential( BasicBlock( 1, 128), BasicBlock(128, 128), BasicBlock(128, 256, pool=(4,2)), Reshape(-1, 256, 32), Permute(2, 0, 1) # T, B, D ) self.rnn = nn.Sequential( nn.LSTM(256, 256, num_layers=2, dropout=0.2, bidirectional=True), ) self.classification = nn.Sequential( nn.Linear(512, vocab+1), nn.LogSoftmax(-1), ) def forward(self, x): x = self.model(x) x, lstm_states = self.rnn(x) y = self.classification(x) return y def ctc(log_probs, target, input_lengths, target_lengths, blank=0): loss = nn.CTCLoss(blank=blank, zero_infinity=True) ctc_loss = loss(log_probs, target, input_lengths, target_lengths) return ctc_loss model = Ocr(len(vocab)).to(device) !pip install torch_summary from torchsummary import summary summary(model, torch.zeros((1,1,32,128)).to(device)) def train_batch(data, model, optimizer, criterion): model.train() imgs, targets, label_lens, input_lens, labels = data optimizer.zero_grad() preds = model(imgs) loss = criterion(preds, targets, input_lens, label_lens) loss.backward() optimizer.step() results = trn_ds.evaluate(model, imgs.to(device), labels) return loss, results @torch.no_grad() def validate_batch(data, model): model.eval() imgs, targets, label_lens, input_lens, labels = data preds = model(imgs) loss = criterion(preds, targets, input_lens, label_lens) return loss, val_ds.evaluate(model, imgs.to(device), labels) model = Ocr(len(vocab)).to(device) criterion = ctc optimizer = optim.AdamW(model.parameters(), lr=3e-3) n_epochs = 50 log = Report(n_epochs) for ep in range( n_epochs): # if ep in lr_schedule: optimizer = AdamW(ocr.parameters(), lr=lr_schedule[ep]) N = len(trn_dl) for ix, data in enumerate(trn_dl): pos = ep + (ix+1)/N loss, results = train_batch(data, model, optimizer, criterion) # scheduler.step() ca, wa = results['char-accuracy'], results['word-accuracy'] log.record(pos=pos, trn_loss=loss, trn_char_acc=ca, trn_word_acc=wa, end='\r') val_results = [] N = len(val_dl) for ix, data in enumerate(val_dl): pos = ep + (ix+1)/N loss, results = validate_batch(data, model) ca, wa = results['char-accuracy'], results['word-accuracy'] log.record(pos=pos, val_loss=loss, val_char_acc=ca, val_word_acc=wa, end='\r') log.report_avgs(ep+1) print() for jx in range(5): img, label = val_ds.sample() _img = torch.Tensor(val_ds.preprocess(img)[None,None]).to(device) pred = model(_img)[:,0,:] pred = trn_ds.decoder_chars(pred) print(f'Pred: `{pred}` :: Truth: `{label}`') print() log.plot_epochs(['trn_word_acc','val_word_acc'], title='Training and validation word accuracy') ```
github_jupyter
## Dependencies ``` import json, glob from tweet_utility_scripts import * from tweet_utility_preprocess_roberta_scripts_aux import * from transformers import TFRobertaModel, RobertaConfig from tokenizers import ByteLevelBPETokenizer from tensorflow.keras import layers from tensorflow.keras.models import Model ``` # Load data ``` test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') print('Test samples: %s' % len(test)) display(test.head()) ``` # Model parameters ``` input_base_path = '/kaggle/input/276-tweet-train-5fold-roberta-avg-last4-onecy-exp3/' with open(input_base_path + 'config.json') as json_file: config = json.load(json_file) config vocab_path = input_base_path + 'vocab.json' merges_path = input_base_path + 'merges.txt' base_path = '/kaggle/input/qa-transformers/roberta/' # vocab_path = base_path + 'roberta-base-vocab.json' # merges_path = base_path + 'roberta-base-merges.txt' config['base_model_path'] = base_path + 'roberta-base-tf_model.h5' config['config_path'] = base_path + 'roberta-base-config.json' model_path_list = glob.glob(input_base_path + '*.h5') model_path_list.sort() print('Models to predict:') print(*model_path_list, sep='\n') ``` # Tokenizer ``` tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True) ``` # Pre process ``` test['text'].fillna('', inplace=True) test['text'] = test['text'].apply(lambda x: x.lower()) test['text'] = test['text'].apply(lambda x: x.strip()) x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test) ``` # Model ``` module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=True) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name='base_model') _, _, hidden_states = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) h12 = hidden_states[-1] h11 = hidden_states[-2] h10 = hidden_states[-3] h09 = hidden_states[-4] avg_hidden = layers.Average()([h12, h11, h10, h09]) logits = layers.Dense(2, use_bias=False, name='qa_outputs')(avg_hidden) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1, name='y_start') end_logits = tf.squeeze(end_logits, axis=-1, name='y_end') model = Model(inputs=[input_ids, attention_mask], outputs=[start_logits, end_logits]) return model ``` # Make predictions ``` NUM_TEST_IMAGES = len(test) test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) for model_path in model_path_list: print(model_path) model = model_fn(config['MAX_LEN']) model.load_weights(model_path) test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'])) test_start_preds += test_preds[0] test_end_preds += test_preds[1] ``` # Post process ``` test['start'] = test_start_preds.argmax(axis=-1) test['end'] = test_end_preds.argmax(axis=-1) test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1) # Post-process test["selected_text"] = test.apply(lambda x: ' '.join([word for word in x['selected_text'].split() if word in x['text'].split()]), axis=1) test['selected_text'] = test.apply(lambda x: x['text'] if (x['selected_text'] == '') else x['selected_text'], axis=1) test['selected_text'].fillna(test['text'], inplace=True) ``` # Visualize predictions ``` test['text_len'] = test['text'].apply(lambda x : len(x)) test['label_len'] = test['selected_text'].apply(lambda x : len(x)) test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' '))) test['label_wordCnt'] = test['selected_text'].apply(lambda x : len(x.split(' '))) test['text_tokenCnt'] = test['text'].apply(lambda x : len(tokenizer.encode(x).ids)) test['label_tokenCnt'] = test['selected_text'].apply(lambda x : len(tokenizer.encode(x).ids)) test['jaccard'] = test.apply(lambda x: jaccard(x['text'], x['selected_text']), axis=1) display(test.head(10)) display(test.describe()) ``` # Test set predictions ``` submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv') submission['selected_text'] = test['selected_text'] submission.to_csv('submission.csv', index=False) submission.head(10) ```
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np import statsmodels.formula.api as smf import statsmodels.api as sm from statsmodels.graphics.regressionplots import influence_plot import sklearn startup=pd.read_csv("50_Startups.csv") startup startup.describe() startup.head() startup.info() startup1=startup.rename({'R&D Spend':'RDS','Administration':'ADMS','Marketing Spend':'MKTS'},axis=1) startup1 startup1[startup1.duplicated()] startup.corr() sns.set_style(style='darkgrid') sns.pairplot(startup1) model=smf.ols("Profit~RDS+ADMS+MKTS",data=startup1).fit() model.params model.tvalues , np.round(model.pvalues,5) (model.rsquared , model.rsquared_adj) slr_a=smf.ols("Profit~ADMS",data=startup1).fit() slr_a.tvalues , slr_a.pvalues slr_m=smf.ols("Profit~MKTS",data=startup1).fit() slr_m.tvalues , slr_m.pvalues mlr_am=smf.ols("Profit~ADMS+MKTS",data=startup1).fit() mlr_am.tvalues , mlr_am.pvalues rsq_r=smf.ols("RDS~ADMS+MKTS",data=startup1).fit().rsquared vif_r=1/(1-rsq_r) rsq_a=smf.ols("ADMS~RDS+MKTS",data=startup1).fit().rsquared vif_a=1/(1-rsq_a) rsq_m=smf.ols("MKTS~RDS+ADMS",data=startup1).fit().rsquared vif_m=1/(1-rsq_m) # Putting the values in Dataframe format d1={'Variables':['RDS','ADMS','MKTS'],'VIF':[vif_r,vif_a,vif_m]} Vif_df=pd.DataFrame(d1) Vif_df sm.qqplot(model.resid,line='q') plt.title("Normal Q-Q plot of residuals") plt.show() sm.qqplot(model.resid,line='q') plt.title("Normal Q-Q plot of residuals") plt.show() list(np.where(model.resid<-30000)) def standard_values(vals) : return (vals-vals.mean())/vals.std() plt.scatter(standard_values(model.fittedvalues),standard_values(model.resid)) plt.title('Residual Plot') plt.xlabel('standardized fitted values') plt.ylabel('standardized residual values') plt.show() fig=plt.figure(figsize=(15,8)) sm.graphics.plot_regress_exog(model,'RDS',fig=fig) plt.show() fig=plt.figure(figsize=(15,8)) sm.graphics.plot_regress_exog(model,'ADMS',fig=fig) plt.show() fig=plt.figure(figsize=(15,8)) sm.graphics.plot_regress_exog(model,'MKTS',fig=fig) plt.show() (c,_)=model.get_influence().cooks_distance c fig=plt.figure(figsize=(20,7)) plt.stem(np.arange(len(startup1)),np.round(c,5)) plt.xlabel('Row Index') plt.ylabel('Cooks Distance') plt.show() np.argmax(c) , np.max(c) influence_plot(model) plt.show() k=startup1.shape[1] n=startup1.shape[0] leverage_cutoff = (3*(k+1))/n leverage_cutoff startup1[startup1.index.isin([49])] startup2=startup1.drop(startup1.index[[49]],axis=0).reset_index(drop=True) startup2 model2 = smf.ols("Profit~RDS+ADMS+MKTS",data=startup2).fit() sm.graphics.plot_partregress_grid(model) model2=smf.ols("Profit~RDS+ADMS+MKTS",data=startup2).fit() (c,_)=model2.get_influence().cooks_distance c np.argmax(c) , np.max(c) startup2=startup2.drop(startup2.index[[np.argmax(c)]],axis=0).reset_index(drop=True) startup2 final_model=smf.ols("Profit~RDS+ADMS+MKTS",data=startup2).fit() final_model.rsquared , final_model.aic print("model accuracy is improved to",final_model.rsquared) final_model.rsquared startup2 new_data=pd.DataFrame({'RDS':70000,"ADMS":90000,"MKTS":140000},index=[0]) new_data final_model.predict(new_data) pred_y=final_model.predict(startup2) pred_y df={'Prep_Models':['Model','Final_Model'],'Rsquared':[model.rsquared,final_model.rsquared]} table=pd.DataFrame(df) print('FINAL MODEL :-') table ```
github_jupyter
## Importing Modules ``` #%matplotlib notebook from tqdm import tqdm %matplotlib inline #Module to handle regular expressions import re #manage files import os #Library for emoji import emoji #Import pandas and numpy to handle data import pandas as pd import numpy as np #import libraries for accessing the database import psycopg2 from sqlalchemy import create_engine from postgres_credentials import * #import libraries for visualization import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud from PIL import Image #Import nltk to check english lexicon import nltk from nltk.tokenize import word_tokenize from nltk.corpus import ( wordnet, stopwords ) #import libraries for tokenization and ML import json; import keras; import keras.preprocessing.text as kpt; #from keras.preprocessing.text import Tokenizer; import sklearn from sklearn.preprocessing import Normalizer from sklearn.feature_extraction.text import ( CountVectorizer, TfidfVectorizer ) from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score #Import all libraries for creating a deep neural network #Sequential is the standard type of neural network with stackable layers from keras.models import ( Sequential, model_from_json ) #Dense: Standard layers with every node connected, dropout: avoids overfitting from keras.layers import Dense, Dropout, Activation; #To anotate database from pycorenlp import StanfordCoreNLP #Querying the database def query_database(tabletweets): engine = create_engine("postgresql+psycopg2://%s:%s@%s:%d/%s" %(usertwitter, passwordtwitter, hosttwitter, porttwitter, dbnametwitter)) table = pd.read_sql_query("select * from %s" %tabletweets,con=engine, index_col="id") return table ``` ## Preprocessing the text Before we dig into analyzing the public opinion on 'Avengers', there is an important step that we need to take: preprocessing the tweet text. But what does this mean? Text preprocessing includes a basic text cleaning following a set of simple rules commonly used but also, advanced techniques that takes into account syntactic and lexical information. ``` #preprocess text in tweets by removing links, @UserNames, blank spaces, etc. def preprocessing_text(table): #put everythin in lowercase table['tweet'] = table['tweet'].str.lower() #Replace rt indicating that was a retweet table['tweet'] = table['tweet'].str.replace('rt', '') #Replace occurences of mentioning @UserNames table['tweet'] = table['tweet'].replace(r'@\w+', '', regex=True) #Replace links contained in the tweet table['tweet'] = table['tweet'].replace(r'http\S+', '', regex=True) table['tweet'] = table['tweet'].replace(r'www.[^ ]+', '', regex=True) #remove numbers table['tweet'] = table['tweet'].replace(r'[0-9]+', '', regex=True) #replace special characters and puntuation marks table['tweet'] = table['tweet'].replace(r'[!"#$%&()*+,-./:;<=>?@[\]^_`{|}~]', '', regex=True) return table #Replace elongated words by identifying those repeated characters and then remove them and compare the new word with the english lexicon def in_dict(word): if wordnet.synsets(word): #if the word is in the dictionary, we'll return True return True def replace_elongated_word(word): regex = r'(\w*)(\w+)\2(\w*)' repl = r'\1\2\3' if in_dict(word): return word new_word = re.sub(regex, repl, word) if new_word != word: return replace_elongated_word(new_word) else: return new_word def detect_elongated_words(row): regexrep = r'(\w*)(\w+)(\2)(\w*)' words = [''.join(i) for i in re.findall(regexrep, row)] for word in words: if not in_dict(word): row = re.sub(word, replace_elongated_word(word), row) return row def stop_words(table): #We need to remove the stop words stop_words_list = stopwords.words('english') table['tweet'] = table['tweet'].str.lower() table['tweet'] = table['tweet'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop_words_list)])) return table def replace_antonyms(word): #We get all the lemma for the word for syn in wordnet.synsets(word): for lemma in syn.lemmas(): #if the lemma is an antonyms of the word if lemma.antonyms(): #we return the antonym return lemma.antonyms()[0].name() return word def handling_negation(row): #Tokenize the row words = word_tokenize(row) speach_tags = ['JJ', 'JJR', 'JJS', 'NN', 'VB', 'VBD', 'VBG', 'VBN', 'VBP'] #We obtain the type of words that we have in the text, we use the pos_tag function tags = nltk.pos_tag(words) #Now we ask if we found a negation in the words tags_2 = '' if "n't" in words and "not" in words: tags_2 = tags[min(words.index("n't"), words.index("not")):] words_2 = words[min(words.index("n't"), words.index("not")):] words = words[:(min(words.index("n't"), words.index("not")))+1] elif "n't" in words: tags_2 = tags[words.index("n't"):] words_2 = words[words.index("n't"):] words = words[:words.index("n't")+1] elif "not" in words: tags_2 = tags[words.index("not"):] words_2 = words[words.index("not"):] words = words[:words.index("not")+1] for index, word_tag in enumerate(tags_2): if word_tag[1] in speach_tags: words = words+[replace_antonyms(word_tag[0])]+words_2[index+2:] break return ' '.join(words) def cleaning_table(table): #This function will process all the required cleaning for the text in our tweets table = preprocessing_text(table) table['tweet'] = table['tweet'].apply(lambda x: detect_elongated_words(x)) table['tweet'] = table['tweet'].apply(lambda x: handling_negation(x)) table = stop_words(table) return table ``` ## Data Visualization After we have cleaned our data but before we start building our model for sentiment analysis, we can perform an exploratory data analysis to see what are the most frequent words that appear in our 'Avengers' tweets. For this part, we will show graphs regarding tweets labelled as positive separated from those labelled as negative. ``` #Vectorization for Data Visualization def vectorization(table): #CountVectorizer will convert a collection of text documents to a matrix of token counts #Produces a sparse representation of the counts #Initialize vector = CountVectorizer() #We fit and transform the vector created frequency_matrix = vector.fit_transform(table.tweet) #Sum all the frequencies for each word sum_frequencies = np.sum(frequency_matrix, axis=0) #Now we use squeeze to remove single-dimensional entries from the shape of an array that we got from applying np.asarray to #the sum of frequencies. frequency = np.squeeze(np.asarray(sum_frequencies)) #Now we get into a dataframe all the frequencies and the words that they correspond to frequency_df = pd.DataFrame([frequency], columns=vector.get_feature_names()).transpose() return frequency_df def word_cloud(tweets): #We get the directory that we are working on file = os.getcwd() #We read the mask image into a numpy array avengers_mask = np.array(Image.open(os.path.join(file, "avengers.png"))) #Now we store the tweets into a series to be able to process #tweets_list = pd.Series([t for t in tweet_table.tweet]).str.cat(sep=' ') #We generate the wordcloud using the series created and the mask word_cloud = WordCloud(width=2000, height=1000, max_font_size=200, background_color="black", max_words=2000, mask=avengers_mask, contour_width=1, contour_color="steelblue", colormap="nipy_spectral", stopwords=["avengers"]) word_cloud.generate(tweets) #wordcloud = WordCloud(width=1600, height=800,max_font_size=200).generate(tweets_list) #Now we plot both figures, the wordcloud and the mask #plt.figure(figsize=(15,15)) plt.figure(figsize=(10,10)) plt.imshow(word_cloud, interpolation="hermite") plt.axis("off") #plt.imshow(avengers_mask, cmap=plt.cm.gray, interpolation="bilinear") #plt.axis("off") plt.show() def graph(word_frequency, sent): labels = word_frequency[0][1:51].index title = "Word Frequency for %s" %sent #Plot the figures plt.figure(figsize=(10,5)) plt.bar(np.arange(50), word_frequency[0][1:51], width = 0.8, color = sns.color_palette("bwr"), alpha=0.5, edgecolor = "black", capsize=8, linewidth=1); plt.xticks(np.arange(50), labels, rotation=90, size=14); plt.xlabel("50 more frequent words", size=14); plt.ylabel("Frequency", size=14); #plt.title('Word Frequency for %s', size=18) %sent; plt.title(title, size=18) plt.grid(False); plt.gca().spines["top"].set_visible(False); plt.gca().spines["right"].set_visible(False); plt.show() def regression_graph(table): table = table[1:] #We set the style of seaborn sns.set_style("whitegrid") #Initialize the figure plt.figure(figsize=(6,6)) #we obtain the points from matplotlib scatter points = plt.scatter(table["Positive"], table["Negative"], c=table["Positive"], s=75, cmap="bwr") #graph the colorbar plt.colorbar(points) #we graph the regplot from seaborn sns.regplot(x="Positive", y="Negative",fit_reg=False, scatter=False, color=".1", data=table) plt.xlabel("Frequency for Positive Tweets", size=14) plt.ylabel("Frequency for Negative Tweets", size=14) plt.title("Word frequency in Positive vs. Negative Tweets", size=14) plt.grid(False) sns.despine() ``` ## Preparing data for model After visualizing our data, the next step is to split our dataset into training and test sets. For doing so, we'll take advantage of the train_test_split functionality of sklearn package. We will take 20% of the dataset for testing following the 20–80% rule. From the remaining 80% used for the training set, we'll save a part for validation of our model. ``` #Split Data into training and test dataset def splitting(table): X_train, X_test, y_train, y_test = train_test_split(table.tweet, table.sentiment, test_size=0.2, shuffle=True) return X_train, X_test, y_train, y_test ``` m ``` #Tokenization for analysis def tokenization_tweets(dataset, features): tokenization = TfidfVectorizer(max_features=features) tokenization.fit(dataset) dataset_transformed = tokenization.transform(dataset).toarray() return dataset_transformed ``` ## Train model ``` #Create a Neural Network #Create the model def train(X_train_mod, y_train, features, shuffle, drop, layer1, layer2, epoch, lr, epsilon, validation): model_nn = Sequential() model_nn.add(Dense(layer1, input_shape=(features,), activation='relu')) model_nn.add(Dropout(drop)) model_nn.add(Dense(layer2, activation='sigmoid')) model_nn.add(Dropout(drop)) model_nn.add(Dense(3, activation='softmax')) optimizer = keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0, amsgrad=False) model_nn.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model_nn.fit(np.array(X_train_mod), y_train, batch_size=32, epochs=epoch, verbose=1, validation_split=validation, shuffle=shuffle) return model_nn ``` ## Test model ``` def test(X_test, model_nn): prediction = model_nn.predict(X_test) return prediction ``` ## Main code ``` if __name__ == "__main__": tabletweets = "tweets_avengers" tweet_table = query_database(tabletweets) tweet_table = cleaning_table(tweet_table) if __name__ == "__main__": #First we draw a word cloud #For All tweets word_cloud(pd.Series([t for t in tweet_table.tweet]).str.cat(sep=' ')) #For positive tweets word_cloud(pd.Series([t for t in tweet_table[tweet_table.sentiment == "Positive"].tweet]).str.cat(sep=' ')) #For negative tweets word_cloud(pd.Series([t for t in tweet_table[tweet_table.sentiment == "Negative"].tweet]).str.cat(sep=' ')) if __name__ == "__main__": #Get the frequency word_frequency = vectorization(tweet_table).sort_values(0, ascending = False) word_frequency_pos = vectorization(tweet_table[tweet_table['sentiment'] == 'Positive']).sort_values(0, ascending = False) word_frequency_neg = vectorization(tweet_table[tweet_table['sentiment'] == 'Negative']).sort_values(0, ascending = False) #Graph with frequency words all, positive and negative tweets and get the frequency graph(word_frequency, 'all') graph(word_frequency_pos, 'positive') graph(word_frequency_neg, 'negative') if __name__ == "__main__": #Concatenate word frequency for positive and negative table_regression = pd.concat([word_frequency_pos, word_frequency_neg], axis=1, sort=False) table_regression.columns = ["Positive", "Negative"] regression_graph(table_regression) if __name__ == "__main__": tabletweets = "tweets_avengers_labeled" tweet_table = query_database(tabletweets) if __name__ == "__main__": tweet_table['sentiment'] = tweet_table['sentiment'].apply(lambda x: 2 if x == 'Positive' else (0 if x == 'Negative' else 1)) if __name__ == "__main__": X_train, X_test, y_train, y_test = splitting(tweet_table) def model1(X_train, y_train): features = 3500 shuffle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.001 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shuffle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model1(X_train, y_train) def model2(X_train, y_train): features = 3000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.001 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model2(X_train, y_train) def model3(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model_final = model3(X_train, y_train) def model4(X_train, y_train): features = 5000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 2 lr = 0.005 epsilon = None validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model4(X_train, y_train) def model5(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-5 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model5(X_train, y_train) def model6(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-8 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model6(X_train, y_train) def model7(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 6 lr = 0.002 epsilon = 1e-8 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; #model7(X_train, y_train) def model8(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model8(X_train, y_train) def model9(X_train, y_train): features = 3500 shufle = False drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.1 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model9(X_train, y_train) def model10(X_train, y_train): features = 3500 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.2 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model10(X_train, y_train) def model11(X_train, y_train): features = 3000 shufle = True drop = 0.5 layer1 = 512 layer2 = 256 epoch = 5 lr = 0.002 epsilon = 1e-9 validation = 0.2 X_train_mod = tokenization_tweets(X_train, features) model = train(X_train_mod, y_train, features, shufle, drop, layer1, layer2, epoch, lr, epsilon, validation) return model; model11(X_train, y_train) def save_model(model): model_json = model.to_json() with open('model.json', 'w') as json_file: json_file.write(model_json) model.save_weights('model.h5') model_final = model7(X_train, y_train) save_model(model_final) if __name__ == "__main__": tabletweetsnew = "tweets_predict_avengers" tweet_table_new = query_database(tabletweetsnew) tweet_table_new = cleaning_table(tweet_table_new) if __name__ == "__main__": X_new = tokenization_tweets(tweet_table_new.tweet, 3500) new_prediction = model_final.predict(X_new) if __name__ == "__main__": labels = ['Negative', 'Neutral', 'Positive'] sentiments = [labels[np.argmax(pred)] for pred in new_prediction] tweet_table_new["sentiment"] = sentiments sizes = [sentiments.count('Negative'), sentiments.count('Neutral'), sentiments.count('Positive')] explode = (0, 0, 0.1) labels = 'Negative', 'Neutral', 'Positive' plt.figure(figsize=(5,5)) plt.pie(sizes, explode=explode, colors="bwr", labels=labels, autopct='%1.1f%%', shadow=True, startangle=90, wedgeprops={'alpha':0.8}) plt.axis('equal') plt.show() if __name__ == "__main__": engine = create_engine("postgresql+psycopg2://%s:%s@%s:%d/%s" %(usertwitter, passwordtwitter, hosttwitter, porttwitter, dbnametwitter)) tweet_table_new.to_sql("tweets_avengers_new_labeled", con=engine, if_exists="append") ``` ### Extra analysis for interaction network ``` if __name__ == "__main__": tweet_table_interaction = pd.read_csv("tweets_final.csv") tweet_table_interaction.rename(columns = {"text": "tweet"}, inplace=True) tweet_table_interaction = cleaning_table(tweet_table_interaction) X_interaction = tokenization_tweets(tweet_table_interaction.tweet, 3500) if __name__ == "__main__": # Open json file of saved model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() # Create a model model = model_from_json(loaded_model_json) # Weight nodes with saved values model.load_weights('model.h5') if __name__ == "__main__": int_prediction = model.predict(X_interaction) labels = ['Negative', 'Neutral', 'Positive'] sentiments = [labels[np.argmax(pred)] for pred in int_prediction] tweet_table_interaction["sentiment"] = sentiments tweet_table_interaction.to_csv("tweets_final_sentiment.csv") ```
github_jupyter
## Load Estonian weather service - https://www.ilmateenistus.ee/teenused/ilmainfo/ilmatikker/ ``` import requests import datetime import xml.etree.ElementTree as ET import pandas as pd from pandas.api.types import is_string_dtype from pandas.api.types import is_numeric_dtype import geopandas as gpd import fiona from fiona.crs import from_epsg import numpy as np from shapely.geometry import Point import matplotlib.pyplot as plt %matplotlib inline req = requests.get("http://www.ilmateenistus.ee/ilma_andmed/xml/observations.php") print(req.encoding) print(req.headers['content-type']) tree = ET.fromstring(req.content.decode(req.encoding) ) print(tree.tag) print(tree.attrib) ts = tree.attrib['timestamp'] print(datetime.datetime.fromtimestamp(int(ts))) data = {'stations' : [], 'wmocode': [], 'precipitations': [], 'airtemperature': [], 'windspeed': [], 'waterlevel': [], 'watertemperature': [], 'geometry': [] } counter = 0 for station in tree.findall('station'): counter += 1 # print(station.tag, child.attrib) # < name > Virtsu < /name > – jaama nimi. name = station.find('name').text data['stations'].append(name) # < wmocode > 26128 < /wmocode > – jaama WMO kood. wmocode = station.find('wmocode').text data['wmocode'].append(wmocode) try: # < longitude > 23.51355555534363 < /longitude > – jaama asukoha koordinaat. lon = station.find('longitude').text # < latitude > 58.572674999100215 < /latitude > – jaama asukoha koordinaat. lat = station.find('latitude').text coords = Point(float(lon), float(lat)) data['geometry'].append(coords) except ValueError as ve: pass # < phenomenon > Light snowfall < /phenomenon > – jaamas esinev ilmastikunähtus, selle puudumisel pilvisuse aste (kui jaamas tehakse manuaalseid pilvisuse mõõtmisi). Täielik nimekiri nähtustest on allpool olevas tabelis. # < visibility > 34.0 < /visibility > – nähtavus (km). # < precipitations > 0 < /precipitations > – sademed (mm) viimase tunni jooksul. Lume, lörtsi, rahe ja teiste taoliste sademete hulk on samuti esitatud vee millimeetritena. 1 cm lund ~ 1 mm vett. precip = station.find('precipitations').text data['precipitations'].append(precip) # < airpressure > 1005.4 < /airpressure > – õhurõhk (hPa). Normaalrõhk on 1013.25 hPa. # < relativehumidity > 57 < /relativehumidity > – suhteline õhuniiskus (%). # < airtemperature > -3.6 < /airtemperature > – õhutemperatuur (°C). temp = station.find('airtemperature').text data['airtemperature'].append(temp) # < winddirection > 101 < /winddirection > – tuule suund (°). # < windspeed > 3.2 < /windspeed > – keskmine tuule kiirus (m/s). wind = station.find('windspeed').text data['windspeed'].append(wind) # < windspeedmax > 5.1 < /windspeedmax > – maksimaalne tuule kiirus ehk puhangud (m/s). # < waterlevel > -49 < /waterlevel > – veetase (cm Kroonlinna nulli suhtes) waterlevel = station.find('waterlevel').text data['waterlevel'].append(waterlevel) # < waterlevel_eh2000 > -28 < waterlevel_eh2000/ > – veetase (cm Amsterdami nulli suhtes) # waterlevel_eh2000 = station.find('waterlevel_eh2000').text # < watertemperature > -0.2 < /watertemperature > – veetemperatuur (°C) watertemp = station.find('watertemperature').text data['watertemperature'].append(watertemp) print(counter) df = pd.DataFrame(data) for field in ['precipitations','airtemperature','windspeed','waterlevel','watertemperature']: if field in df.columns: if is_string_dtype(df[field]): df[field] = df[field].astype(float) display(df.head(5)) geo_df = gpd.GeoDataFrame(df, crs=from_epsg(4326), geometry='geometry') geo_df.plot() water_df = geo_df.dropna(subset=['precipitations']) water_df.plot(column='precipitations', legend=True) geo_df_3301 = geo_df.dropna(subset=['precipitations']).to_crs(epsg=3301) geo_df_3301['x'] = geo_df_3301['geometry'].apply(lambda p: p.x) geo_df_3301['y'] = geo_df_3301['geometry'].apply(lambda p: p.y) display(geo_df_3301.head(5)) geo_df_3301.to_file('ilmateenistus_precip_stations.shp', encoding='utf-8') ``` ## IDW in Python from scratch blogpost https://www.geodose.com/2019/09/creating-idw-interpolation-from-scratch-python.html - IDW Algorithm Implementation in Python - IDW Interpolation Algorithm Based on Block Radius Sampling Point - IDW Interpolation based on Minimum Number of Sampling Point ``` geo_df_3301.dtypes from idw_basic import idw_rblock, idw_npoint x_idw_list1, y_idw_list1, z_head1 = idw_rblock(x=geo_df_3301['x'].astype(float).values.tolist(), y=geo_df_3301['y'].astype(float).values.tolist(), z=geo_df_3301['precipitations'].values.tolist(), grid_side_length=200, search_radius=50000, p=1.5) display(len(x_idw_list1)) display(len(y_idw_list1)) display(len(z_head1)) display(np.array(z_head1).shape) plt.matshow(z_head1, origin='lower') plt.colorbar() plt.show() ``` _idw_npoint_ might take very long, due to ierative search radius increase to find at least n nearest neighbours ``` x_idw_list2, y_idw_list2, z_head2 = idw_npoint(x=geo_df_3301['x'].astype(float).values.tolist(), y=geo_df_3301['y'].astype(float).values.tolist(), z=geo_df_3301['airtemperature'].values.tolist(), grid_side_length=100, n_points=3, p=1.5, rblock_iter_distance=50000) display(len(x_idw_list2)) display(len(y_idw_list2)) display(len(z_head2)) display(np.array(z_head2).shape) plt.matshow(z_head2, origin='lower') plt.colorbar() plt.show() ``` ## Inverse distance weighting (IDW) in Python with a KDTree By Copyright (C) 2016 Paul Brodersen <paulbrodersen+idw@gmail.com> under GPL-3.0 code: https://github.com/paulbrodersen/inverse_distance_weighting Inverse distance weighting is an interpolation method that computes the score of query points based on the scores of their k-nearest neighbours, weighted by the inverse of their distances. As each query point is evaluated using the same number of data points, this method allows for strong gradient changes in regions of high sample density while imposing smoothness in data sparse regions. uses: - numpy - scipy.spatial (for cKDTree) ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline import idw_knn XY_obs_coords = np.vstack([geo_df_3301['x'].values, geo_df_3301['y'].values]).T z_arr = geo_df_3301['precipitations'].values display(XY_obs_coords.shape) display(z_arr.shape) # returns a function that is trained (the tree setup) for the interpolation on the grid idw_tree = idw_knn.tree(XY_obs_coords, z_arr) all_dist_m = geo_df_3301['x'].max() - geo_df_3301['x'].min() dist_km_x = all_dist_m / 1000 display(dist_km_x) all_dist_m_y = geo_df_3301['y'].max() - geo_df_3301['y'].min() dist_km_y = all_dist_m_y / 1000 display(dist_km_y) # prepare grids # number of target interpolation grid shape along x and y axis, e.g. 150*100 raster pixels nx=int(dist_km_x) ny=int(dist_km_y) # preparing the "output" grid x_spacing = np.linspace(geo_df_3301['x'].min(), geo_df_3301['x'].max(), nx) y_spacing = np.linspace(geo_df_3301['y'].min(), geo_df_3301['y'].max(), ny) # preparing the target grid x_y_grid_pairs = np.meshgrid(x_spacing, y_spacing) x_y_grid_pairs_list = np.reshape(x_y_grid_pairs, (2, -1)).T display(f"x_y_grid_pairs {len(x_y_grid_pairs)}") display(f"x_y_grid_pairs_list reshaped {x_y_grid_pairs_list.shape}") # now interpolating onto the target grid z_arr_interp = idw_tree(x_y_grid_pairs_list) display(f"z_arr_interp {z_arr_interp.shape}") # plot fig, (ax1, ax2) = plt.subplots(1,2, sharex=True, sharey=True, figsize=(10,3)) ax1.scatter(XY_obs_coords[:,0], XY_obs_coords[:,1], c=geo_df_3301['precipitations'], linewidths=0) ax1.set_title('Observation samples') ax2.contourf(x_spacing, y_spacing, z_arr_interp.reshape((ny,nx))) ax2.set_title('Interpolation') plt.show() z_arr_interp.shape plt.matshow(z_arr_interp.reshape((ny,nx)), origin='lower') plt.colorbar() plt.show() display(f"x_spacing {x_spacing.shape}") display(f"y_spacing {y_spacing.shape}") # is a x_y_grid_pair a list of two ndarrays, each is fully spatial 100x150 fields, one holds the x coords the other the y coords x_mg = np.meshgrid(x_spacing, y_spacing) display(f"x_mg {type(x_mg)} {len(x_mg)} len0 {type(x_mg[0])} {len(x_mg[0])} {x_mg[0].shape} len1 {type(x_mg[1])} {len(x_mg[1])} {x_mg[0].shape}") # the yget reshaped into two long flattened arrays the joint full list of target x y pairs representing all grid locations x_mg_interp_prep = np.reshape(x_mg, (2, -1)).T display(f"x_mg_interp_prep {type(x_mg_interp_prep)} {len(x_mg_interp_prep)} {x_mg_interp_prep.shape}") ``` ## Interpolation in Python with Radial Basis Function - https://stackoverflow.com/a/3114117 ``` from scipy.interpolate import Rbf def scipy_idw(x, y, z, xi, yi): interp = Rbf(x, y, z, function='linear') return interp(xi, yi) def plot(x,y,z,grid): plt.figure() grid_flipped = np.flipud(grid) plt.imshow(grid, extent=(x.min(), x.max(), y.min(), y.max()), origin='lower') # plt.hold(True) plt.scatter(x,y,c=z) plt.colorbar() # nx, ny = 50, 50 x=geo_df_3301['x'].astype(float).values y=geo_df_3301['y'].astype(float).values z=geo_df_3301['precipitations'].values xi = np.linspace(x.min(), x.max(), nx) yi = np.linspace(y.min(), y.max(), ny) xi, yi = np.meshgrid(xi, yi) xi, yi = xi.flatten(), yi.flatten() grid2 = scipy_idw(x,y,z,xi,yi) grid2 = grid2.reshape((ny, nx)) plot(x,y,z,grid2) plt.title("Scipy's Rbf with function=linear") # plot fig, (ax1, ax2, ax3) = plt.subplots(1,3, sharex=True, sharey=True, figsize=(10,3)) ax1.scatter(x,y, c=z, linewidths=0) ax1.set_title('Observation samples') ax2.contourf(np.linspace(x.min(), x.max(), nx), np.linspace(y.min(), y.max(), ny), grid2) ax2.set_title('Interpolation contours') ax3.imshow(np.flipud(grid2), extent=(x.min(), x.max(), y.min(), y.max())) ax3.set_title('RBF pixels') plt.show() ``` ## surface/contour/mesh plotting of interpolated grids https://matplotlib.org/3.1.0/gallery/images_contours_and_fields/pcolormesh_levels.html#sphx-glr-gallery-images-contours-and-fields-pcolormesh-levels-py ``` from matplotlib.colors import BoundaryNorm from matplotlib.ticker import MaxNLocator from matplotlib import cm nbins=15 levels = MaxNLocator(nbins=nbins).tick_values(z_arr_interp.min(), z_arr_interp.max()) # pick the desired colormap, sensible levels, and define a normalization # instance which takes data values and translates those into levels. cmap = plt.get_cmap('viridis') norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) # plot fig, (ax1, ax2) = plt.subplots(1,2, sharex=True, sharey=True, figsize=(10,3)) im = ax1.pcolormesh(x_idw_list1, y_idw_list1, np.array(z_head1), cmap=cmap, norm=norm) fig.colorbar(im, ax=ax1) ax1.set_title('pcolormesh with normalisation (nbins={})'.format(nbins)) im2 = ax2.pcolormesh(x_idw_list1, y_idw_list1, np.array(z_head1), cmap=cm.viridis) fig.colorbar(im2, ax=ax2) ax2.set_title('pcolormesh without explicit normalisation') plt.show() # plot fig, (ax1, ax2) = plt.subplots(1,2, sharex=True, sharey=True, figsize=(10,3)) cf = ax1.contourf(x_spacing, y_spacing, z_arr_interp.reshape((ny,nx)), levels=levels, cmap=cmap) fig.colorbar(cf, ax=ax1) ax1.set_title('contourf with {} levels'.format(nbins)) cf2 = ax2.contourf(x_spacing, y_spacing, z_arr_interp.reshape((ny,nx)), cmap=cm.viridis) fig.colorbar(cf2, ax=ax2) ax2.set_title('contourf with defaut levels') plt.show() z_arr_interp.reshape((ny,nx)).shape ``` ## Writing interpolated array to a raster file - GeoTiff raster with GDAL Python ``` from fiona.crs import from_epsg import pyproj import osgeo.osr import gdal gdal.UseExceptions() # wkt_projection = CRS("EPSG:3301") -> techniclly should tae crs from the geodataframe crs = pyproj.Proj(from_epsg(3301)) srs = osgeo.osr.SpatialReference() srs.ImportFromProj4(crs.srs) wkt_projection = srs.ExportToWkt() # # KDTree z_arr_interp # ncols = nx nrows = ny cell_unit_sizeX = (geo_df_3301['x'].max() - geo_df_3301['x'].min()) / ncols cell_unit_sizeY = (geo_df_3301['y'].max() - geo_df_3301['y'].min()) / nrows testnp = z_arr_interp.reshape((ny,nx)) xllcorner = geo_df_3301['x'].min() xulcorner = geo_df_3301['x'].min() yllcorner = geo_df_3301['y'].min() yulcorner = geo_df_3301['y'].max() nodata_value = -9999 driver = gdal.GetDriverByName("GTiff") dataset = driver.Create("kdtree_precip_rasterout1.tif", ncols, nrows, 1, gdal.GDT_Float32 ) dataset.SetProjection(wkt_projection) dataset.SetGeoTransform((xulcorner,cell_unit_sizeX,0,yulcorner,0,-cell_unit_sizeY)) dataset.GetRasterBand(1).WriteArray(np.flipud(testnp)) band = dataset.GetRasterBand(1) band.SetNoDataValue(nodata_value) dataset.FlushCache() # dereference band to avoid gotcha described previously band = None dataset = None # # RBF grid2 # testnp = grid2.reshape((ny,nx)) ncols = nx nrows = ny cell_unit_sizeX = (geo_df_3301['x'].max() - geo_df_3301['x'].min()) / ncols cell_unit_sizeY = (geo_df_3301['y'].max() - geo_df_3301['y'].min()) / nrows xllcorner = geo_df_3301['x'].min() xulcorner = geo_df_3301['x'].min() yllcorner = geo_df_3301['y'].min() yulcorner = geo_df_3301['y'].max() nodata_value = -9999 driver = gdal.GetDriverByName("GTiff") dataset = driver.Create("rbf_precip_rasterout1.tif", ncols, nrows, 1, gdal.GDT_Float32 ) dataset.SetProjection(wkt_projection) dataset.SetGeoTransform((xulcorner,cell_unit_sizeX,0,yulcorner,0,-cell_unit_sizeY)) dataset.GetRasterBand(1).WriteArray(np.flipud(testnp)) band = dataset.GetRasterBand(1) band.SetNoDataValue(nodata_value) dataset.FlushCache() # dereference band to avoid gotcha described previously band = None dataset = None ncols = 200 nrows = 200 cell_unit_sizeX = (geo_df_3301['x'].max() - geo_df_3301['x'].min()) / ncols cell_unit_sizeY = (geo_df_3301['y'].max() - geo_df_3301['y'].min()) / nrows xllcorner = geo_df_3301['x'].min() xulcorner = geo_df_3301['x'].min() yllcorner = geo_df_3301['y'].min() yulcorner = geo_df_3301['y'].max() nodata_value = -9999 driver = gdal.GetDriverByName("GTiff") # dataset = driver.Create("%s"%(OutputFile), NROWS, NCOLS, 1, gdal.GDT_Float32 ) dataset = driver.Create("idw_basic_precip_rasterout1.tif", ncols, nrows, 1, gdal.GDT_Float32 ) dataset.SetProjection(wkt_projection) dataset.SetGeoTransform((xulcorner,cell_unit_sizeX,0,yulcorner,0,-cell_unit_sizeY)) dataset.GetRasterBand(1).WriteArray(np.flipud(np.array(z_head1))) band = dataset.GetRasterBand(1) band.SetNoDataValue(nodata_value) dataset.FlushCache() # dereference band to avoid gotcha described previously band = None dataset = None ``` ## Point Query RasterStats - https://pythonhosted.org/rasterstats/manual.html#basic-example ``` from rasterstats import point_query xm = gpd.read_file('ilmateenistus_precip_stations.shp', encoding="utf-8") pts_kd = point_query('ilmateenistus_precip_stations.shp', "kdtree_precip_rasterout1.tif") pts_rbf = point_query('ilmateenistus_precip_stations.shp', "rbf_precip_rasterout1.tif") pts_idw = point_query('ilmateenistus_precip_stations.shp', "idw_basic_precip_rasterout1.tif") xm['pcp_kdtree'] = pts_kd xm['pcp_rbf'] = pts_rbf xm['pcp_idw'] = pts_idw xm = xm[['precipitat','pcp_kdtree','pcp_rbf','pcp_idw']].dropna() from sklearn.metrics import mean_squared_error, r2_score x_l = [] for rst in ['pcp_kdtree', 'pcp_rbf', 'pcp_idw']: rmse = np.sqrt(mean_squared_error(xm['precipitat'], xm[rst])) r2 = r2_score(xm['precipitat'], xm[rst]) x_l.append({ 'name': rst, 'rmse': rmse, 'r2': r2}) pd.DataFrame(x_l) ```
github_jupyter
<a href="https://colab.research.google.com/github/john-s-butler-dit/Numerical-Analysis-Python/blob/master/Chapter%2008%20-%20Heat%20Equations/801_Heat%20Equation-%20FTCS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # The Explicit Forward Time Centered Space (FTCS) Difference Equation for the Heat Equation #### John S Butler john.s.butler@tudublin.ie [Course Notes](https://johnsbutler.netlify.com/files/Teaching/Numerical_Analysis_for_Differential_Equations.pdf) [Github](https://github.com/john-s-butler-dit/Numerical-Analysis-Python) ## Overview This notebook will implement the explicit Forward Time Centered Space (FTCS) Difference method for the Heat Equation. ## The Heat Equation The Heat Equation is the first order in time ($t$) and second order in space ($x$) Partial Differential Equation [1-3]: \begin{equation} \frac{\partial u}{\partial t} = \frac{\partial^2 u}{\partial x^2},\end{equation} The equation describes heat transfer on a domain \begin{equation} \Omega = \{ t \geq 0\leq x \leq 1\}. \end{equation} with an initial condition at time $t=0$ for all $x$ and boundary condition on the left ($x=0$) and right side ($x=1$). ## Forward Time Centered Space (FTCS) Difference method This notebook will illustrate the Forward Time Centered Space (FTCS) Difference method for the Heat Equation with the __initial conditions__ \begin{equation} u(x,0)=2x, \ \ 0 \leq x \leq \frac{1}{2}, \end{equation} \begin{equation} u(x,0)=2(1-x), \ \ \frac{1}{2} \leq x \leq 1, \end{equation} and __boundary condition__ \begin{equation}u(0,t)=0, u(1,t)=0. \end{equation} ``` # LIBRARY # vector manipulation import numpy as np # math functions import math # THIS IS FOR PLOTTING %matplotlib inline import matplotlib.pyplot as plt # side-stepping mpl backend import warnings warnings.filterwarnings("ignore") ``` ## Discete Grid The region $\Omega$ is discretised into a uniform mesh $\Omega_h$. In the space $x$ direction into $N$ steps giving a stepsize of \begin{equation}h=\frac{1-0}{N},\end{equation} resulting in \begin{equation}x[i]=0+ih, \ \ \ i=0,1,...,N,\end{equation} and into $N_t$ steps in the time $t$ direction giving a stepsize of \begin{equation} k=\frac{1-0}{N_t}\end{equation} resulting in \begin{equation}t[j]=0+jk, \ \ \ j=0,...,15.\end{equation} The Figure below shows the discrete grid points for $N=10$ and $Nt=100$, the known boundary conditions (green), initial conditions (blue) and the unknown values (red) of the Heat Equation. ``` N=10 Nt=1000 h=1/N k=1/Nt r=k/(h*h) time_steps=15 time=np.arange(0,(time_steps+.5)*k,k) x=np.arange(0,1.0001,h) X, Y = np.meshgrid(x, time) fig = plt.figure() plt.plot(X,Y,'ro'); plt.plot(x,0*x,'bo',label='Initial Condition'); plt.plot(np.ones(time_steps+1),time,'go',label='Boundary Condition'); plt.plot(x,0*x,'bo'); plt.plot(0*time,time,'go'); plt.xlim((-0.02,1.02)) plt.xlabel('x') plt.ylabel('time (ms)') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title(r'Discrete Grid $\Omega_h,$ h= %s, k=%s'%(h,k),fontsize=24,y=1.08) plt.show(); ``` ## Discrete Initial and Boundary Conditions The discrete initial conditions are \begin{equation} w[i,0]=2x[i], \ \ 0 \leq x[i] \leq \frac{1}{2} \end{equation} \begin{equation}w[i,0]=2(1-x[i]), \ \ \frac{1}{2} \leq x[i] \leq 1 \end{equation} and the discrete boundary conditions are \begin{equation} w[0,j]=0, w[10,j]=0, \end{equation} where $w[i,j]$ is the numerical approximation of $U(x[i],t[j])$. The Figure below plots values of $w[i,0]$ for the inital (blue) and boundary (red) conditions for $t[0]=0.$ ``` w=np.zeros((N+1,time_steps+1)) b=np.zeros(N-1) # Initial Condition for i in range (1,N): w[i,0]=2*x[i] if x[i]>0.5: w[i,0]=2*(1-x[i]) # Boundary Condition for k in range (0,time_steps): w[0,k]=0 w[N,k]=0 fig = plt.figure(figsize=(8,4)) plt.plot(x,w[:,0],'o:',label='Initial Condition') plt.plot(x[[0,N]],w[[0,N],0],'go',label='Boundary Condition t[0]=0') #plt.plot(x[N],w[N,0],'go') plt.xlim([-0.1,1.1]) plt.ylim([-0.1,1.1]) plt.title('Intitial and Boundary Condition',fontsize=24) plt.xlabel('x') plt.ylabel('w') plt.legend(loc='best') plt.show() ``` ## The Explicit Forward Time Centered Space (FTCS) Difference Equation The explicit Forwards Time Centered Space (FTCS) difference equation of the Heat Equation is derived by discretising \begin{equation} \frac{\partial u_{ij}}{\partial t} = \frac{\partial^2 u_{ij}}{\partial x^2},\end{equation} around $(x_i,t_{j})$ giving the difference equation \begin{equation} \frac{w_{ij+1}-w_{ij}}{k}=\frac{w_{i+1j}-2w_{ij}+w_{i-1j}}{h^2}, \end{equation} rearranging the equation we get \begin{equation} w_{ij+1}=rw_{i-1j}+(1-2r)w_{ij}+rw_{i+1j}, \end{equation} for $i=1,...9$ where $r=\frac{k}{h^2}$. This gives the formula for the unknown term $w_{ij+1}$ at the $(ij+1)$ mesh points in terms of $x[i]$ along the jth time row. Hence we can calculate the unknown pivotal values of $w$ along the first row of $j=1$ in terms of the known boundary conditions. This can be written in matrix form \begin{equation}\mathbf{w}_{j+1}=A\mathbf{w}_{j} +\mathbf{b}_{j} \end{equation} for which $A$ is a $9\times9$ matrix: \begin{equation} \left(\begin{array}{c} w_{1j+1}\\ w_{2j+1}\\ w_{3j+1}\\ w_{4j+1}\\ w_{5j+1}\\ w_{6j+1}\\ w_{7j+1}\\ w_{8j+1}\\ w_{9j+1}\\ \end{array}\right). =\left(\begin{array}{cccc cccc} 1-2r&r& 0&0&0 &0&0&0\\ r&1-2r&r&0&0&0 &0&0&0\\ 0&r&1-2r &r&0&0& 0&0&0\\ 0&0&r&1-2r &r&0&0& 0&0\\ 0&0&0&r&1-2r &r&0&0& 0\\ 0&0&0&0&r&1-2r &r&0&0\\ 0&0&0&0&0&r&1-2r &r&0\\ 0&0&0&0&0&0&r&1-2r&r\\ 0&0&0&0&0&0&0&r&1-2r\\ \end{array}\right) \left(\begin{array}{c} w_{1j}\\ w_{2j}\\ w_{3j}\\ w_{4j}\\ w_{5j}\\ w_{6j}\\ w_{7j}\\ w_{8j}\\ w_{9j}\\ \end{array}\right)+ \left(\begin{array}{c} rw_{0j}\\ 0\\ 0\\ 0\\ 0\\ 0\\ 0\\ 0\\ rw_{10j}\\ \end{array}\right). \end{equation} It is assumed that the boundary values $w_{0j}$ and $w_{10j}$ are known for $j=1,2,...$, and $w_{i0}$ for $i=0,...,10$ is the initial condition. The Figure below shows the values of the $9\times 9$ matrix in colour plot form for $r=\frac{k}{h^2}$. ``` A=np.zeros((N-1,N-1)) for i in range (0,N-1): A[i,i]=1-2*r # DIAGONAL for i in range (0,N-2): A[i+1,i]=r # UPPER DIAGONAL A[i,i+1]=r # LOWER DIAGONAL fig = plt.figure(figsize=(6,4)); #plt.matshow(A); plt.imshow(A,interpolation='none'); plt.xticks(np.arange(N-1), np.arange(1,N-0.9,1)); plt.yticks(np.arange(N-1), np.arange(1,N-0.9,1)); clb=plt.colorbar(); clb.set_label('Matrix elements values'); #clb.set_clim((-1,1)); plt.title('Matrix r=%s'%(np.round(r,3)),fontsize=24) fig.tight_layout() plt.show(); ``` ## Results To numerically approximate the solution at $t[1]$ the matrix equation becomes \begin{equation} \mathbf{w}_{1}=A\mathbf{w}_{0} +\mathbf{b}_{0} \end{equation} where all the right hand side is known. To approximate solution at time $t[2]$ we use the matrix equation \begin{equation} \mathbf{w}_{2}=A\mathbf{w}_{1} +\mathbf{b}_{1}. \end{equation} Each set of numerical solutions $w[i,j]$ for all $i$ at the previous time step is used to approximate the solution $w[i,j+1]$. The Figure below shows the numerical approximation $w[i,j]$ of the Heat Equation using the FTCS method at $x[i]$ for $i=0,...,10$ and time steps $t[j]$ for $j=1,...,15$. The left plot shows the numerical approximation $w[i,j]$ as a function of $x[i]$ with each color representing the different time steps $t[j]$. The right plot shows the numerical approximation $w[i,j]$ as colour plot as a function of $x[i]$, on the $x[i]$ axis and time $t[j]$ on the $y$ axis. For $r>\frac{1}{2}$ the method is unstable resulting a solution that oscillates unnaturally between positive and negative values for each time step. ``` fig = plt.figure(figsize=(12,6)) plt.subplot(121) for j in range (1,time_steps+1): b[0]=r*w[0,j-1] b[N-2]=r*w[N,j-1] w[1:(N),j]=np.dot(A,w[1:(N),j-1]) plt.plot(x,w[:,j],'o:',label='t[%s]=%s'%(j,np.round(time[j],4))) plt.xlabel('x') plt.ylabel('w') #plt.legend(loc='bottom', bbox_to_anchor=(0.5, -0.1)) plt.legend(bbox_to_anchor=(-.4, 1), loc=2, borderaxespad=0.) plt.subplot(122) plt.imshow(w.transpose()) plt.xticks(np.arange(len(x)), x) plt.yticks(np.arange(len(time)), np.round(time,4)) plt.xlabel('x') plt.ylabel('time') clb=plt.colorbar() clb.set_label('Temperature (w)') plt.suptitle('Numerical Solution of the Heat Equation r=%s'%(np.round(r,3)),fontsize=24,y=1.08) fig.tight_layout() plt.show() ``` ## Local Trunction Error The local truncation error of the classical explicit difference approach to \begin{equation} \frac{\partial U}{\partial t} - \frac{\partial^2 U}{\partial x^2}=0, \end{equation} with \begin{equation} F_{ij}(w)=\frac{w_{ij+1}-w_{ij}}{k}-\frac{w_{i+1j}-2w_{ij}+w_{i-1j}}{h^2}=0, \end{equation} is \begin{equation} T_{ij}=F_{ij}(U)=\frac{U_{ij+1}-U_{ij}}{k}-\frac{U_{i+1j}-2U_{ij}+U_{i-1j}}{h^2}, \end{equation} By Taylors expansions we have \begin{eqnarray*} U_{i+1j}&=&U((i+1)h,jk)=U(x_i+h,t_j)\\ &=&U_{ij}+h\left(\frac{\partial U}{\partial x} \right)_{ij}+\frac{h^2}{2}\left(\frac{\partial^2 U}{\partial x^2} \right)_{ij}+\frac{h^3}{6}\left(\frac{\partial^3 U}{\partial x^3} \right)_{ij} +...\\ U_{i-1j}&=&U((i-1)h,jk)=U(x_i-h,t_j)\\ &=&U_{ij}-h\left(\frac{\partial U}{\partial x} \right)_{ij}+\frac{h^2}{2}\left(\frac{\partial^2 U}{\partial x^2} \right)_{ij}-\frac{h^3}{6}\left(\frac{\partial^3 U}{\partial x^3} \right)_{ij} +...\\ U_{ij+1}&=&U(ih,(j+1)k)=U(x_i,t_j+k)\\ &=&U_{ij}+k\left(\frac{\partial U}{\partial t} \right)_{ij}+\frac{k^2}{2}\left(\frac{\partial^2 U}{\partial t^2} \right)_{ij}+\frac{k^3}{6}\left(\frac{\partial^3 U}{\partial t^3} \right)_{ij} +... \end{eqnarray*} substitution into the expression for $T_{ij}$ then gives \begin{eqnarray*} T_{ij}&=&\left(\frac{\partial U}{\partial t} - \frac{\partial^2 U}{\partial x^2} \right)_{ij}+\frac{k}{2}\left(\frac{\partial^2 U}{\partial t^2} \right)_{ij} -\frac{h^2}{12}\left(\frac{\partial^4 U}{\partial x^4} \right)_{ij}\\ & & +\frac{k^2}{6}\left(\frac{\partial^3 U}{\partial t^3} \right)_{ij} -\frac{h^4}{360}\left(\frac{\partial^6 U}{\partial x^6} \right)_{ij}+ ... \end{eqnarray*} But $U$ is the solution to the differential equation so \begin{equation} \left(\frac{\partial U}{\partial t} - \frac{\partial^2 U}{\partial x^2} \right)_{ij}=0,\end{equation} the principal part of the local truncation error is \begin{equation} \frac{k}{2}\left(\frac{\partial^2 U}{\partial t^2} \right)_{ij}-\frac{h^2}{12}\left(\frac{\partial^4 U}{\partial x^4} \right)_{ij}. \end{equation} Hence the truncation error is \begin{equation} T_{ij}=O(k)+O(h^2). \end{equation} ## Stability Analysis To investigating the stability of the fully explicit FTCS difference method of the Heat Equation, we will use the von Neumann method. The FTCS difference equation is: \begin{equation}\frac{1}{k}(w_{pq+1}-w_{pq})=\frac{1}{h_x^2}(w_{p-1q}-2w_{pq}+w_{p+1q}),\end{equation} approximating \begin{equation}\frac{\partial U}{\partial t}=\frac{\partial^2 U}{\partial x^2}\end{equation} at $(ph,qk)$. Substituting $w_{pq}=e^{i\beta x}\xi^{q}$ into the difference equation gives: \begin{equation}e^{i\beta ph}\xi^{q+1}-e^{i\beta ph}\xi^{q}=r\{e^{i\beta (p-1)h}\xi^{q}-2e^{i\beta ph}\xi^{q}+e^{i\beta (p+1)h}\xi^{q} \} \end{equation} where $r=\frac{k}{h_x^2}$. Divide across by $e^{i\beta (p)h}\xi^{q}$ leads to \begin{equation} \xi-1=r(e^{i\beta (-1)h} -2+e^{i\beta h}),\end{equation} \begin{equation}\xi= 1+r (2\cos(\beta h)-2),\end{equation} \begin{equation}\xi=1-4r(\sin^2(\beta\frac{h}{2})).\end{equation} Hence \begin{equation}\left| 1-4r(\sin^2(\beta\frac{h}{2}) )\right|\leq 1\end{equation} for this to hold \begin{equation} 4r(\sin^2(\beta\frac{h}{2}) )\leq 2 \end{equation} which means \begin{equation} r\leq \frac{1}{2}.\end{equation} therefore the equation is conditionally stable as $0 < \xi \leq 1$ for $r<\frac{1}{2}$ and all $\beta$ . ## References [1] G D Smith Numerical Solution of Partial Differential Equations: Finite Difference Method Oxford 1992 [2] Butler, J. (2019). John S Butler Numerical Methods for Differential Equations. [online] Maths.dit.ie. Available at: http://www.maths.dit.ie/~johnbutler/Teaching_NumericalMethods.html [Accessed 14 Mar. 2019]. [3] Wikipedia contributors. (2019, February 22). Heat equation. In Wikipedia, The Free Encyclopedia. Available at: https://en.wikipedia.org/w/index.php?title=Heat_equation&oldid=884580138 [Accessed 14 Mar. 2019]. ``` ```
github_jupyter
## DS/CMPSC 410 MiniProject #3 ### Spring 2021 ### Instructor: John Yen ### TA: Rupesh Prajapati and Dongkuan Xu ### Learning Objectives - Be able to apply thermometer encoding to encode numerical variables into binary variable format. - Be able to apply k-means clustering to the Darknet dataset based on both thermometer encoding and one-hot encoding. - Be able to use external labels (e.g., mirai, zmap, and masscan) to evaluate the result of k-means clustering. - Be able to investigate characteristics of a cluster using one-hot encoded feature. ### Total points: 100 - Exercise 1: 5 points - Exercise 2: 5 points - Exercise 3: 5 points - Exercise 4: 15 points - Exercise 5: 5 points - Exercise 6: 10 points - Exercise 7: 5 points - Exercise 8: 5 points - Exercise 9: 10 points - Exercise 10: 5 points - Exercise 11: 10 points - Exercise 12: 20 points ### Due: 5 pm, April 23, 2021 ``` import pyspark import csv from pyspark import SparkContext from pyspark.sql import SparkSession from pyspark.sql.types import StructField, StructType, StringType, LongType from pyspark.sql.functions import col, column from pyspark.sql.functions import expr from pyspark.sql.functions import split from pyspark.sql.functions import array_contains from pyspark.sql import Row from pyspark.ml import Pipeline from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, IndexToString, PCA from pyspark.ml.clustering import KMeans from pyspark.ml.evaluation import ClusteringEvaluator import pandas as pd import numpy as np import math ss = SparkSession.builder.master("local").appName("ClusteringTE").getOrCreate() ``` ## Exercise 1 (5 points) Complete the path for input file in the code below and enter your name in this Markdown cell: - Name: Kangdong Yuan ``` Scanners_df = ss.read.csv("/storage/home/kky5082/ds410/Lab10/sampled_profile.csv", header= True, inferSchema=True ) ``` ## We can use printSchema() to display the schema of the DataFrame Scanners_df to see whether it was inferred correctly. ``` Scanners_df.printSchema() Scanners_df.where(col('mirai')).count() ``` # Part A: One Hot Encoding ## This part is identical to that of Miniproject Deliverable #2 We want to apply one hot encoding to the set of ports scanned by scanners. - A.1 Like Mini Project deliverable 1 and 2, we first convert the feature "ports_scanned_str" to a feature that is an Array of ports - A.2 We then calculate the total number of scanners for each port - A.3 We identify the top n port to use for one-hot encoding (You choose the number n). - A.4 Generate one-hot encoded feature for these top n ports. ``` # Scanners_df.select("ports_scanned_str").show(30) Scanners_df2=Scanners_df.withColumn("Ports_Array", split(col("ports_scanned_str"), "-") ) # Scanners_df2.persist().show(10) ``` ## A.1 We only need the column ```Ports_Array``` to calculate the top ports being scanned ``` Ports_Scanned_RDD = Scanners_df2.select("Ports_Array").rdd # Ports_Scanned_RDD.persist().take(5) ``` ## Because each port number in the Ports_Array column for each row occurs only once, we can count the total occurance of each port number through flatMap. ``` Ports_list_RDD = Ports_Scanned_RDD.map(lambda row: row[0] ) # Ports_list_RDD.persist() Ports_list2_RDD = Ports_Scanned_RDD.flatMap(lambda row: row[0] ) Port_count_RDD = Ports_list2_RDD.map(lambda x: (x, 1)) # Port_count_RDD.take(2) Port_count_total_RDD = Port_count_RDD.reduceByKey(lambda x,y: x+y, 1) # Port_count_total_RDD.persist().take(5) Sorted_Count_Port_RDD = Port_count_total_RDD.map(lambda x: (x[1], x[0])).sortByKey( ascending = False) # Sorted_Count_Port_RDD.persist().take(50) ``` ## Exercise 2 (5%) Select top_ports to be the number of top ports you want to use for one-hot encoding. I recommend a number between 20 and 40. ``` top_ports=30 Sorted_Ports_RDD= Sorted_Count_Port_RDD.map(lambda x: x[1]) Top_Ports_list = Sorted_Ports_RDD.take(top_ports) # Top_Ports_list # Scanners_df3=Scanners_df2.withColumn(FeatureName, array_contains("Ports_Array", Top_Ports_list[0])) # Scanners_df3.show(10) ``` ## A.4 Generate Hot-One Encoded Feature for each of the top ports in the Top_Ports_list - Iterate through the Top_Ports_list so that each top port is one-hot encoded. ## Exercise 3 (5 %) Complete the following PySpark code for encoding the n ports using One Hot Encoding, where n is specified by the variable ```top_ports``` ``` for i in range(0, top_ports - 1): # "Port" + Top_Ports_list[i] is the name of each new feature created through One Hot Encoding Scanners_df3 = Scanners_df2.withColumn("Port" + Top_Ports_list[i], array_contains("Ports_Array", Top_Ports_list[i])) Scanners_df2 = Scanners_df3 Scanners_df2.printSchema() ``` # Part B Thermometer Encoding of Numerical Variables ## We encode the numerical variable numports (number of ports being scanned) using thermometer encoding ``` pow(2,15) Scanners_df3=Scanners_df2.withColumn("TE_numports_0", col("numports") > 0) Scanners_df2 = Scanners_df3 Scanners_df3.count() Scanners_df3.where(col('TE_numports_0')).count() ``` # Exercise 4 (15%) Complete the following pyspark code to use the column "numports" to create 16 additional columns as follows: - TE_numports_0 : True, if the scanner scans more than 0 ports, otherwise False. - TE_numports_1 : True, if the scanner scans more than 2**0 (1) port, otherwise False. - TE_numports_2 : True, if the scanner scans more than 2**1 (2) ports, otherwise False. - TE_numports_3 : True, if the scanner scans more than 2**2 (4) ports, otherwise False ... - TE_numports_15 : True, if the scanner scans more than 2**14 ports, otherwise False - TE_numports_16 : True, if the scanner scans more than 2**15 (32768) ports, otherwise False ``` for i in range(0, 16): # "TE_numports_" + str(i+1) is the name of each new feature created for each Bin in Thermometer Encoding Scanners_df3 = Scanners_df2.withColumn("TE_numports_" + str(i+1), col("numports") > pow(2,i)) Scanners_df2 = Scanners_df3 Scanners_df2.printSchema() ``` # Exercise 5 (5 points) What is the total number of scanners that scan more than 2^15 (i.e., 32768) ports? Complete the code below using Scanners_df2 to find out the answer. ``` HFScanners_df2 = Scanners_df2.where(col('TE_numports_15')) HFScanners_df2.count() ``` # Exercise 6 (10 points) Complete the following code to use k-means to cluster the scanners using the following - thermometer encoding of 'numports' numerical feature - one-hot encoding of top k ports (k chosen by you in Exercise 2). ## Specify Parameters for k Means Clustering ``` km = KMeans(featuresCol="features", predictionCol="prediction").setK(50).setSeed(123) km.explainParams() input_features = [] for i in range(0, top_ports - 1): input_features.append( "Port"+Top_Ports_list[i] ) for i in range(0, 15): input_features.append( "TE_numports_" + str(i)) print(input_features) va = VectorAssembler().setInputCols(input_features).setOutputCol("features") data= va.transform(Scanners_df2) data.persist() kmModel=km.fit(data) kmModel predictions = kmModel.transform(data) predictions.persist() Cluster1_df=predictions.where(col("prediction")==0) Cluster1_df.persist().count() ``` ## Exercise 7 (5 points) Complete the following code to find the size of all of the clusters generated. ``` summary = kmModel.summary summary.clusterSizes ``` # Exercise 8 (5 points) Complete the following code to find the Silhouette Score of the clustering result. ``` evaluator = ClusteringEvaluator() silhouette = evaluator.evaluate(predictions) print('Silhouette Score of the Clustering Result is ', silhouette) centers = kmModel.clusterCenters() centers[0] print("Cluster Centers:") i=0 for center in centers: print("Cluster ", str(i+1), center) i = i+1 ``` # Part C Percentage of Mirai Malwares in Each Cluster # Exercise 9 (10 points) Complete the following code to compute the percentage of Mirai Malwares, Zmap, and Masscan in each cluster. ``` cluster_eval_df = pd.DataFrame( columns = ['cluster ID', 'size', 'cluster center', 'mirai_ratio', 'zmap_ratio', 'masscan_ratio'] ) for i in range(0, 50): cluster_i = predictions.where(col('prediction')==i) cluster_i_size = cluster_i.count() cluster_i_mirai_count = cluster_i.where(col('mirai')).count() cluster_i_mirai_ratio = cluster_i_mirai_count/cluster_i_size if cluster_i_mirai_count > 0: print("Cluster ", i, "; Mirai Ratio:", cluster_i_mirai_ratio, "; Cluster Size: ", cluster_i_size) cluster_i_zmap_ratio = (cluster_i.where(col('zmap')).count())/cluster_i_size cluster_i_masscan_ratio = (cluster_i.where(col('masscan')).count())/cluster_i_size cluster_eval_df.loc[i]=[i, cluster_i_size, centers[i], cluster_i_mirai_ratio, cluster_i_zmap_ratio, cluster_i_masscan_ratio ] ``` # Exercise 10 (5 points) Identify all of the clusters that have a large percentage of Mirai malware. For example, you can choose clusters with at least 80% of Mirai ratio. If you use a different threshold (other than 80%), describe the threshold you used and the rational of your choice. ## Answer to Exercise 10: ## if I choose 80% as threshold - Cluster 5 ; Mirai Ratio: 0.8424333084018948 ; Cluster Size: 16044 - Cluster 37 ; Mirai Ratio: 0.8878737541528239 ; Cluster Size: 1204 ... ``` # You can filter predictions DataFrame (Spark) to get all scanners in a cluster. # For example, the code below selects scanners in cluster 5. However, you should # replace 5 with the ID of the cluster you want to investigate. cluster_selected = predictions.where((col('prediction')==5) | (col('prediction')==37)) # If you prefer to use Pandas dataframe, you can use the following to convert a cluster to a Pandas dataframe cluster_selected_df = cluster_selected.select("*").toPandas() cluster_selected.printSchema() ``` # Exercise 11 (10 points) Complete the following code to find out, for each of the clusters you identified in Exercise 10, - (1) (5 points) determine whether they scan a common port, and - (2) (5 points) what is the port number if most of them in a cluster scan a common port. You canuse the code below to find out what top port is scanned by the scanner in a cluster. ``` # You fill in the ??? based on the cluster you want to investigate. cluster_5= predictions.where(col('prediction')==5) cluster_37= predictions.where(col('prediction')==37) for i in range(0, top_ports -1): port_num = "Port" + Top_Ports_list[i] port_i_count = cluster_5.where(col(port_num)).count() if port_i_count > 0: print("Scanners of Port ", Top_Ports_list[i], " = ", port_i_count) for i in range(0, top_ports -1): port_num = "Port" + Top_Ports_list[i] port_i_count = cluster_37.where(col(port_num)).count() if port_i_count > 0: print("Scanners of Port ", Top_Ports_list[i], " = ", port_i_count) ``` # Answer to Exercise 11 - (1) (5 points) They all scan the common ports, cluster scan the port 23, and cluster 37 also scan the port23, and port 23 is the common port. - (2) (5 points) The top port in cluster 5 is port2 which has 16044 times. And the top port in cluster 37 is port 2323, which is also common port and has 1204 times. # Exercise 12 (20 points) Based on the results above and those of mini project deliverable #2, answer the following questions: - (a) Why the clustering result of mini project #3 is better than that of #2? (5 points) - (b) Based on your answer of (a), what is the general lesson you learned for solving clustering problems? (5 points) - (c) Did you find anything interesting and/or surprising using Mirai labels to evaluate the clustering result? (5 points) - (d) Based on your answer of (c), what is the general lesson you learned regarding evaluating clustering? (5 points) # Answer to Exercise 12: - (a) Because the mini project#3 use the thermometer Encoding, because mini-project-#2 contain mixture of numerical variables and (One Hot Encoded) categorical variables, but thermometer Encoding can improve it. But, in real execution the project 3 get worser score than project 2. I think we can use hypermeter tunning to improve the proformance of kmean. - (b) To improve the cluster proformacne, I need avoid mixture of numerical variables and (One Hot Encoded), numerical variables not normalized, and high dimensional feature space. Moreover, I can use thermometer Encoding to improve my kmean cluster. - (c) I find that Mirai labels give me a better way to do Clustering Validation. And, it return accurate ratio for me to know the validation score. - (d) External label are used when we propose a new clustering technique and we want to validate it or we want to compare it to existing techniques. In these cases, we get a bunch of datasets for which we know the ground truth and see if our clustering technique is able to produce clustering solutions that are similar to it. So we can use external validity to improve our Clustering Validation.
github_jupyter
<a href="https://cognitiveclass.ai/"> <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center"> </a> <h1>Dictionaries in Python</h1> <p><strong>Welcome!</strong> This notebook will teach you about the dictionaries in the Python Programming Language. By the end of this lab, you'll know the basics dictionary operations in Python, including what it is, and the operations on it.</p> <div class="alert alert-block alert-info" style="margin-top: 20px"> <a href="http://cocl.us/topNotebooksPython101Coursera"> <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center"> </a> </div> <h2>Table of Contents</h2> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ul> <li> <a href="#dic">Dictionaries</a> <ul> <li><a href="content">What are Dictionaries?</a></li> <li><a href="key">Keys</a></li> </ul> </li> <li> <a href="#quiz">Quiz on Dictionaries</a> </li> </ul> <p> Estimated time needed: <strong>20 min</strong> </p> </div> <hr> <h2 id="Dic">Dictionaries</h2> <h3 id="content">What are Dictionaries?</h3> A dictionary consists of keys and values. It is helpful to compare a dictionary to a list. Instead of the numerical indexes such as a list, dictionaries have keys. These keys are the keys that are used to access values within a dictionary. <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/DictsList.png" width="650" /> An example of a Dictionary <code>Dict</code>: ``` # Create the dictionary Dict = {"key1": 1, "key2": "2", "key3": [3, 3, 3], "key4": (4, 4, 4), ('key5'): 5, (0, 1): 6} Dict ``` The keys can be strings: ``` # Access to the value by the key Dict["key1"] ``` Keys can also be any immutable object such as a tuple: ``` # Access to the value by the key Dict[(0, 1)] ``` Each key is separated from its value by a colon "<code>:</code>". Commas separate the items, and the whole dictionary is enclosed in curly braces. An empty dictionary without any items is written with just two curly braces, like this "<code>{}</code>". ``` # Create a sample dictionary release_year_dict = {"Thriller": "1982", "Back in Black": "1980", \ "The Dark Side of the Moon": "1973", "The Bodyguard": "1992", \ "Bat Out of Hell": "1977", "Their Greatest Hits (1971-1975)": "1976", \ "Saturday Night Fever": "1977", "Rumours": "1977"} release_year_dict ``` In summary, like a list, a dictionary holds a sequence of elements. Each element is represented by a key and its corresponding value. Dictionaries are created with two curly braces containing keys and values separated by a colon. For every key, there can only be one single value, however, multiple keys can hold the same value. Keys can only be strings, numbers, or tuples, but values can be any data type. It is helpful to visualize the dictionary as a table, as in the following image. The first column represents the keys, the second column represents the values. <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/DictsStructure.png" width="650" /> <h3 id="key">Keys</h3> You can retrieve the values based on the names: ``` # Get value by keys release_year_dict['Thriller'] ``` This corresponds to: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/DictsKeyOne.png" width="500" /> Similarly for <b>The Bodyguard</b> ``` # Get value by key release_year_dict['The Bodyguard'] ``` <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/DictsKeyTwo.png" width="500" /> Now let you retrieve the keys of the dictionary using the method <code>release_year_dict()</code>: ``` # Get all the keys in dictionary release_year_dict.keys() ``` You can retrieve the values using the method <code>values()</code>: ``` # Get all the values in dictionary release_year_dict.values() ``` We can add an entry: ``` # Append value with key into dictionary release_year_dict['Graduation'] = '2007' release_year_dict ``` We can delete an entry: ``` # Delete entries by key del(release_year_dict['Thriller']) del(release_year_dict['Graduation']) release_year_dict ``` We can verify if an element is in the dictionary: ``` # Verify the key is in the dictionary 'The Bodyguard' in release_year_dict ``` <hr> <h2 id="quiz">Quiz on Dictionaries</h2> <b>You will need this dictionary for the next two questions:</b> ``` # Question sample dictionary soundtrack_dic = {"The Bodyguard":"1992", "Saturday Night Fever":"1977"} soundtrack_dic ``` a) In the dictionary <code>soundtrack_dict</code> what are the keys ? ``` # Write your code below and press Shift+Enter to execute soundtrack_dic.keys() ``` Double-click __here__ for the solution. <!-- Your answer is below: soundtrack_dic.keys() # The Keys "The Bodyguard" and "Saturday Night Fever" --> b) In the dictionary <code>soundtrack_dict</code> what are the values ? ``` # Write your code below and press Shift+Enter to execute soundtrack_dic.values() ``` Double-click __here__ for the solution. <!-- Your answer is below: soundtrack_dic.values() # The values are "1992" and "1977" --> <hr> <b>You will need this dictionary for the following questions:</b> The Albums <b>Back in Black</b>, <b>The Bodyguard</b> and <b>Thriller</b> have the following music recording sales in millions 50, 50 and 65 respectively: a) Create a dictionary <code>album_sales_dict</code> where the keys are the album name and the sales in millions are the values. ``` # Write your code below and press Shift+Enter to execute album_sales_dict = {"Back in Black":50, "The Bodyguard":50, "Thriller":65} album_sales_dict ``` Double-click __here__ for the solution. <!-- Your answer is below: album_sales_dict = {"The Bodyguard":50, "Back in Black":50, "Thriller":65} --> b) Use the dictionary to find the total sales of <b>Thriller</b>: ``` # Write your code below and press Shift+Enter to execute album_sales_dict["Thriller"] ``` Double-click __here__ for the solution. <!-- Your answer is below: album_sales_dict["Thriller"] --> c) Find the names of the albums from the dictionary using the method <code>keys</code>: ``` # Write your code below and press Shift+Enter to execute album_sales_dict.keys() ``` Double-click __here__ for the solution. <!-- Your answer is below: album_sales_dict.keys() --> d) Find the names of the recording sales from the dictionary using the method <code>values</code>: ``` # Write your code below and press Shift+Enter to execute album_sales_dict.values() ``` Double-click __here__ for the solution. <!-- Your answer is below: album_sales_dict.values() --> <hr> <h2>The last exercise!</h2> <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. <hr> <div class="alert alert-block alert-info" style="margin-top: 20px"> <h2>Get IBM Watson Studio free of charge!</h2> <p><a href="https://cocl.us/bottemNotebooksPython101Coursera"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p> </div> <h3>About the Authors:</h3> <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p> Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a> <hr> <p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
github_jupyter
# Stochastic Variational GP Regression ## Overview In this notebook, we'll give an overview of how to use SVGP stochastic variational regression ((https://arxiv.org/pdf/1411.2005.pdf)) to rapidly train using minibatches on the `3droad` UCI dataset with hundreds of thousands of training examples. This is one of the more common use-cases of variational inference for GPs. If you are unfamiliar with variational inference, we recommend the following resources: - [Variational Inference: A Review for Statisticians](https://arxiv.org/abs/1601.00670) by David M. Blei, Alp Kucukelbir, Jon D. McAuliffe. - [Scalable Variational Gaussian Process Classification](https://arxiv.org/abs/1411.2005) by James Hensman, Alex Matthews, Zoubin Ghahramani. ``` import tqdm import math import torch import gpytorch from matplotlib import pyplot as plt # Make plots inline %matplotlib inline ``` For this example notebook, we'll be using the `song` UCI dataset used in the paper. Running the next cell downloads a copy of the dataset that has already been scaled and normalized appropriately. For this notebook, we'll simply be splitting the data using the first 80% of the data as training and the last 20% as testing. **Note**: Running the next cell will attempt to download a **~136 MB** file to the current directory. ``` import urllib.request import os from scipy.io import loadmat from math import floor # this is for running the notebook in our testing framework smoke_test = ('CI' in os.environ) if not smoke_test and not os.path.isfile('../elevators.mat'): print('Downloading \'elevators\' UCI dataset...') urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1jhWL3YUHvXIaftia4qeAyDwVxo6j1alk', '../elevators.mat') if smoke_test: # this is for running the notebook in our testing framework X, y = torch.randn(1000, 3), torch.randn(1000) else: data = torch.Tensor(loadmat('../elevators.mat')['data']) X = data[:, :-1] X = X - X.min(0)[0] X = 2 * (X / X.max(0)[0]) - 1 y = data[:, -1] train_n = int(floor(0.8 * len(X))) train_x = X[:train_n, :].contiguous() train_y = y[:train_n].contiguous() test_x = X[train_n:, :].contiguous() test_y = y[train_n:].contiguous() if torch.cuda.is_available(): train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda() ``` ## Creating a DataLoader The next step is to create a torch `DataLoader` that will handle getting us random minibatches of data. This involves using the standard `TensorDataset` and `DataLoader` modules provided by PyTorch. In this notebook we'll be using a fairly large batch size of 1024 just to make optimization run faster, but you could of course change this as you so choose. ``` from torch.utils.data import TensorDataset, DataLoader train_dataset = TensorDataset(train_x, train_y) train_loader = DataLoader(train_dataset, batch_size=1024, shuffle=True) test_dataset = TensorDataset(test_x, test_y) test_loader = DataLoader(test_dataset, batch_size=1024, shuffle=False) ``` ## Creating a SVGP Model For most variational/approximate GP models, you will need to construct the following GPyTorch objects: 1. A **GP Model** (`gpytorch.models.ApproximateGP`) - This handles basic variational inference. 1. A **Variational distribution** (`gpytorch.variational._VariationalDistribution`) - This tells us what form the variational distribution q(u) should take. 1. A **Variational strategy** (`gpytorch.variational._VariationalStrategy`) - This tells us how to transform a distribution q(u) over the inducing point values to a distribution q(f) over the latent function values for some input x. Here, we use a `VariationalStrategy` with `learn_inducing_points=True`, and a `CholeskyVariationalDistribution`. These are the most straightforward and common options. #### The GP Model The `ApproximateGP` model is GPyTorch's simplest approximate inference model. It approximates the true posterior with a distribution specified by a `VariationalDistribution`, which is most commonly some form of MultivariateNormal distribution. The model defines all the variational parameters that are needed, and keeps all of this information under the hood. The components of a user built `ApproximateGP` model in GPyTorch are: 1. An `__init__` method that constructs a mean module, a kernel module, a variational distribution object and a variational strategy object. This method should also be responsible for construting whatever other modules might be necessary. 2. A `forward` method that takes in some $n \times d$ data `x` and returns a MultivariateNormal with the *prior* mean and covariance evaluated at `x`. In other words, we return the vector $\mu(x)$ and the $n \times n$ matrix $K_{xx}$ representing the prior mean and covariance matrix of the GP. ``` from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution from gpytorch.variational import VariationalStrategy class GPModel(ApproximateGP): def __init__(self, inducing_points): variational_distribution = CholeskyVariationalDistribution(inducing_points.size(0)) variational_strategy = VariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=True) super(GPModel, self).__init__(variational_strategy) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) inducing_points = train_x[:500, :] model = GPModel(inducing_points=inducing_points) likelihood = gpytorch.likelihoods.GaussianLikelihood() if torch.cuda.is_available(): model = model.cuda() likelihood = likelihood.cuda() ``` ### Training the Model The cell below trains the model above, learning both the hyperparameters of the Gaussian process **and** the parameters of the neural network in an end-to-end fashion using Type-II MLE. Unlike when using the exact GP marginal log likelihood, performing variational inference allows us to make use of stochastic optimization techniques. For this example, we'll do one epoch of training. Given the small size of the neural network relative to the size of the dataset, this should be sufficient to achieve comparable accuracy to what was observed in the DKL paper. The optimization loop differs from the one seen in our more simple tutorials in that it involves looping over both a number of training iterations (epochs) *and* minibatches of the data. However, the basic process is the same: for each minibatch, we forward through the model, compute the loss (the `VariationalELBO` or ELBO), call backwards, and do a step of optimization. ``` num_epochs = 1 if smoke_test else 4 model.train() likelihood.train() optimizer = torch.optim.Adam([ {'params': model.parameters()}, {'params': likelihood.parameters()}, ], lr=0.01) # Our loss object. We're using the VariationalELBO mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.size(0)) epochs_iter = tqdm.notebook.tqdm(range(num_epochs), desc="Epoch") for i in epochs_iter: # Within each iteration, we will go over each minibatch of data minibatch_iter = tqdm.notebook.tqdm(train_loader, desc="Minibatch", leave=False) for x_batch, y_batch in minibatch_iter: optimizer.zero_grad() output = model(x_batch) loss = -mll(output, y_batch) minibatch_iter.set_postfix(loss=loss.item()) loss.backward() optimizer.step() ``` ### Making Predictions The next cell gets the predictive covariance for the test set (and also technically gets the predictive mean, stored in `preds.mean()`). Because the test set is substantially smaller than the training set, we don't need to make predictions in mini batches here, although this can be done by passing in minibatches of `test_x` rather than the full tensor. ``` model.eval() likelihood.eval() means = torch.tensor([0.]) with torch.no_grad(): for x_batch, y_batch in test_loader: preds = model(x_batch) means = torch.cat([means, preds.mean.cpu()]) means = means[1:] print('Test MAE: {}'.format(torch.mean(torch.abs(means - test_y.cpu())))) ```
github_jupyter
# MRCA estimation ------- You can access your data via the dataset number. For example, ``handle = open(get(42), 'r')``. To save data, write your data to a file, and then call ``put('filename.txt')``. The dataset will then be available in your galaxy history. Notebooks can be saved to Galaxy by clicking the large green button at the top right of the IPython interface.<br> More help and informations can be found on the project [website](https://github.com/bgruening/galaxy-ipython). ## Inputs ------ This notebook expects two inputs from Galaxy history: 1. a comma separated list of accession numbers and corresponding collection dates 2. a phylogenetic tree (in newick format) in which OTU labels correspond to accession numbers from input 1 Here is an example of input 1: ``` Accession,Collection_Date MT049951,2020-01-17 MT019531,2019-12-30 MT019529,2019-12-23 MN975262,2020-01-11 MN996528,2019-12-30 MT019532,2019-12-30 MT019530,2019-12-30 MN994468,2020-01-22 ``` ``` # Set history items for datasets containing accession/dates and a maximum likelihood tree: # These numbers correspond to numbers of Galaxy datasets acc_date = 1 tree = 116 !pip install --upgrade pip==20.0.2 !pip install --upgrade statsmodels==0.11.0 !pip install --upgrade pandas==0.24.2 from Bio import Phylo as phylo from matplotlib import pyplot as plt import pandas as pd import datetime import statsmodels.api as sm import statsmodels.formula.api as smf %matplotlib inline # Get accessions and dates acc_path = get(acc_date) # Get ML tree tree_path = get(tree) !mv {acc_path} acc_date.csv !mv {tree_path} tree.nwk col_dates = pd.read_csv('acc_date.csv') col_dates tree = next( phylo.parse( 'tree.nwk', "newick" ) ) plt.rcParams['figure.figsize'] = [15, 50] phylo.draw( tree ) def root_to_tip( tree, date_df ): accum = [] def tree_walker( clade, total_branch_length ): for child in clade.clades: if child.is_terminal: if child.name is not None: date = date_df[date_df['Accession']==child.name]['Collection_Date'].to_string(index=False) accum.append( ( child.name, date, total_branch_length + child.branch_length ) ) tree_walker( child, total_branch_length + child.branch_length ) tree_walker( tree.clade, 0 ) return pd.DataFrame( accum, columns=["name","date","distance_to_root"] ) for clade in list( tree.find_clades() ): tree.root_with_outgroup( clade ) df = root_to_tip( tree, col_dates ) df['date'] = pd.to_datetime(df['date']) df['date_as_numeric'] = [d.year + (d.dayofyear-1)/365 for d in df['date']] plt.rcParams['figure.figsize'] = [15, 10] df.plot( x="date", y="distance_to_root" ) df['date_as_numeric'] = [d.year + (d.dayofyear-1)/365 for d in df['date']] ``` ## MRCA timing is ... ``` import datetime def decimal_to_calendar (decimal): years = int (decimal) d = datetime.datetime (years, 1,1) + datetime.timedelta (days = int ((decimal-years)*365)) return d model = smf.ols(formula='distance_to_root ~ date_as_numeric ', data=df) results = model.fit() print( results.summary() ) print ("Root predicted at {}".format(decimal_to_calendar(-results.params.Intercept/results.params.date_as_numeric))) ```
github_jupyter
<div style="width:1000 px"> <div style="float:right; width:98 px; height:98px;"> <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;"> </div> <h1>Introduction to Pandas</h1> <h3>Unidata Python Workshop</h3> <div style="clear:both"></div> </div> <hr style="height:2px;"> ## Overview: * **Teaching:** 30 minutes * **Exercises:** 30 minutes ### Questions 1. What is Pandas? 1. What are the basic Pandas data structures? 1. How can I read data into Pandas? 1. What are some of the data operations available in Pandas? ### Objectives 1. <a href="#series">Data Series</a> 1. <a href="#frames">Data Frames</a> 1. <a href="#loading">Loading Data in Pandas</a> 1. <a href="#missing">Missing Data</a> 1. <a href="#manipulating">Manipulating Data</a> <a name="series"></a> ## Data Series Data series are one of the fundamental data structures in Pandas. You can think of them like a dictionary; they have a key (index) and value (data/values) like a dictionary, but also have some handy functionality attached to them. To start out, let's create a series from scratch. We'll imagine these are temperature observations. ``` from pandas import Series temperatures = Series([23, 20, 25, 18]) temperatures ``` The values on the left are the index (zero based integers by default) and on the right are the values. Notice that the data type is an integer. Any NumPy datatype is acceptable in a series. That's great, but it'd be more useful if the station were associated with those values. In fact you could say we want the values *indexed* by station name. ``` temperatures = Series([23, 20, 25, 18], index=['TOP', 'OUN', 'DAL', 'DEN']) temperatures ``` Now, very similar to a dictionary, we can use the index to access and modify elements. ``` temperatures['DAL'] temperatures[['DAL', 'OUN']] ``` We can also do basic filtering, math, etc. ``` temperatures[temperatures > 20] temperatures + 2 ``` Remember how I said that series are like dictionaries? We can create a series striaght from a dictionary. ``` dps = {'TOP': 14, 'OUN': 18, 'DEN': 9, 'PHX': 11, 'DAL': 23} dewpoints = Series(dps) dewpoints ``` It's also easy to check and see if an index exists in a given series: ``` 'PHX' in dewpoints 'PHX' in temperatures ``` Series have a name attribute and their index has a name attribute. ``` temperatures.name = 'temperature' temperatures.index.name = 'station' temperatures ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Create a series of pressures for stations TOP, OUN, DEN, and DAL (assign any values you like).</li> <li>Set the series name and series index name.</li> <li>Print the pressures for all stations which have a dewpoint below 15.</li> </ul> </div> ``` # Your code goes here ``` <button data-toggle="collapse" data-target="#sol1" class='btn btn-primary'>View Solution</button> <div id="sol1" class="collapse"> <code><pre> pressures = Series([1012.1, 1010.6, 1008.8, 1011.2], index=['TOP', 'OUN', 'DEN', 'DAL']) pressures.name = 'pressure' pressures.index.name = 'station' print(pressures[dewpoints < 15]) </pre></code> </div> <a href="#top">Top</a> <hr style="height:2px;"> <a name="frames"></a> ## Data Frames Series are great, but what about a bunch of related series? Something like a table or a spreadsheet? Enter the data frame. A data frame can be thought of as a dictionary of data series. They have indexes for their rows and their columns. Each data series can be of a different type , but they will all share a common index. The easiest way to create a data frame by hand is to use a dictionary. ``` from pandas import DataFrame data = {'station': ['TOP', 'OUN', 'DEN', 'DAL'], 'temperature': [23, 20, 25, 18], 'dewpoint': [14, 18, 9, 23]} df = DataFrame(data) df ``` You can access columns (data series) using dictionary type notation or attribute type notation. ``` df['temperature'] df.dewpoint ``` Notice the index is shared and that the name of the column is attached as the series name. You can also create a new column and assign values. If I only pass a scalar it is duplicated. ``` df['wspeed'] = 0. df ``` Let's set the index to be the station. ``` df.index = df.station df ``` Well, that's close, but we now have a redundant column, so let's get rid of it. ``` df.drop('station', 1, inplace=True) df ``` Now let's get a row from the dataframe instead of a column. ``` df.loc['DEN'] ``` We can even transpose the data easily if we needed that do make things easier to merge/munge later. ``` df.T ``` Look at the `values` attribute to access the data as a 1D or 2D array for series and data frames recpectively. ``` df.values df.temperature.values ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Add a series of rain observations to the existing data frame.</li> <li>Apply an instrument correction of -2 to the dewpoint observations.</li> </ul> </div> ``` # Your code goes here ``` <button data-toggle="collapse" data-target="#sol2" class='btn btn-primary'>View Solution</button> <div id="sol2" class="collapse"> <code><pre> df['rain'] = [0, 0.4, 0.2, 0] df.dewpoint = df.dewpoint - 2 df </pre></code> </div> <a href="#top">Top</a> <hr style="height:2px;"> <a name="loading"></a> ## Loading Data in Pandas The real power of pandas is in manupulating and summarizing large sets of tabular data. To do that, we'll need a large set of tabular data. We've included a file in this directory called `JAN17_CO_ASOS.txt` that has all of the ASOS observations for several stations in Colorado for January of 2017. It's a few hundred thousand rows of data in a tab delimited format. Let's load it into Pandas. ``` import pandas as pd df = pd.read_table('Jan17_CO_ASOS.txt') df.head() df = pd.read_table('Jan17_CO_ASOS.txt', parse_dates=['valid']) df.head() df = pd.read_table('Jan17_CO_ASOS.txt', parse_dates=['valid'], na_values='M') df.head() ``` Let's look in detail at those column names. Turns out we need to do some cleaning of this file. Welcome to real world data analysis. ``` df.columns df.columns = ['station', 'time', 'temperature', 'dewpoint', 'pressure'] df.head() ``` For other formats of data CSV, fixed width, etc. that are tools to read it as well. You can even read excel files straight into Pandas. <a href="#top">Top</a> <hr style="height:2px;"> <a name="missing"></a> ## Missing Data We've already dealt with some missing data by turning the 'M' string into actual NaN's while reading the file in. We can do one better though and delete any rows that have all values missing. There are similar operations that could be performed for columns. You can even drop if any values are missing, all are missing, or just those you specify are missing. ``` len(df) df.dropna(axis=0, how='all', subset=['temperature', 'dewpoint', 'pressure'], inplace=True) len(df) df.head() ``` <div class="alert alert-success"> <b>EXERCISE</b>: Create a new data frame called df2 that contains all data that only have temperature, dewpoint and pressure observations. </div> ``` # Your code goes here # df2 = ``` <button data-toggle="collapse" data-target="#sol3" class='btn btn-primary'>View Solution</button> <div id="sol3" class="collapse"> <code><pre> df2 = df.dropna(how='any') df2 </pre></code> </div> Lastly, we still have the original index values. Let's reindex to a new zero-based index for only the rows that have valid data in them. ``` df.reset_index(drop=True, inplace=True) df.head() ``` <a href="#top">Top</a> <hr style="height:2px;"> <a name="manipulating"></a> ## Manipulating Data We can now take our data and do some intersting things with it. Let's start with a simple min/max. ``` print('Min: {}\nMax: {}'.format(df.temperature.min(), df.temperature.max())) ``` You can also do some useful statistics on data with attached methods like corr for correlation coefficient. ``` df.temperature.corr(df.dewpoint) ``` We can also call a `groupby` on the data frame to start getting some summary information for each station. ``` df.groupby('station').mean() ``` <div class="alert alert-success"> <b>EXERCISE</b>: Calculate the min, max, and standard deviation of the temperature field grouped by each station. </div> ``` # Calculate min # Calculate max # Calculate standard deviation ``` <button data-toggle="collapse" data-target="#sol4" class='btn btn-primary'>View Solution</button> <div id="sol4" class="collapse"> <code><pre> print(df.groupby('station').min()) print(df.groupby('station').max()) print(df.groupby('station').std()) </pre></code> </div> Now, let me show you how to do all of that and more in a single call. ``` df.groupby('station').describe() ``` Now let's suppose we're going to make a meteogram or similar and want to get all of the data for a single station. ``` df.groupby('station').get_group('0CO').head().reset_index(drop=True) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li>Round the temperature column to whole degrees.</li> <li>Group the observations by temperaturee and use the count method to see how many instances of the rounded temperatures there are in the dataset.</li> </div> ``` # Your code goes here ``` <button data-toggle="collapse" data-target="#sol5" class='btn btn-primary'>View Solution</button> <div id="sol5" class="collapse"> <code><pre> df.temperature = df.temperature.round() df.groupby('temperature').count() </pre></code> </div> <a href="#top">Top</a> <hr style="height:2px;">
github_jupyter
# Simulation iteration Let $A$ be an $m \times m$ real symmetric matrix with eigenvalue decomposition: $\newcommand{\ffrac}{\displaystyle \frac} \newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}A = Q \Lambda \Tran{Q}$, where the eigenvalues of $A$ ar ordered as $\left| \lambda_1 \right| > \left| \lambda_2 \right| > \left| \lambda_3 \right| \geq \cdots \geq \left| \lambda_m \right|$ with corresponding orthogonal orthonormal eigenvectors $\vec{q}_1, \vec{q}_2, \dots, \vec{q}_m$, the columns of matrix $Q$. Now we try to obtain the two largest eigenvalues $\lambda_1, \lambda_2$ and their corresponding eigenvectors. We start from $\vec{e}_1, \vec{e}_2$ to do the power iteration. $$\vec{e}_1 = \sum_{i=1}^{m} \alpha_i \vec{q}_i, \vec{e}_2 = \sum_{i=1}^{m} \beta_i \vec{q}_i$$ So after we assume that $\alpha_1 \neq 0$, as $k \to \infty$, $\left\| \ffrac{ A^k \vec{e}_1} {\alpha_1 \lambda_1^k} \right\| \to \left\| \vec{q}_1 \right\|$,$\left\| \ffrac{ A^k \vec{e}_2} {\beta_1 \lambda_1^k} \right\| \to \left\| \vec{q}_1 \right\|$ with convergence speed the ratio of $\ffrac{\lambda_2} {\lambda_1}$; also seeing from $\beta_1 A^k \vec{e}_1 - \alpha_1 A^k \vec{e}_2$, if we also assume that $\ffrac{\alpha_2} {\alpha_1} \neq \ffrac{\beta_2} {\beta_1}$, $i.e.$, $\left| \begin{array}{cc} \alpha_1 & \alpha_2 \\ \beta_1 & \beta_2 \end{array} \right| \neq 0$, as $k \to \infty$, in term of spanned space: $\left \langle A^k\vec{e}_1, A^k\vec{e}_2 \right \rangle \to \left \langle \vec{q}_1, \vec{q}_2 \right \rangle$. Now from $QR$ factorization, we have $$A^k = \left[\begin{array}{cccc} A^k\vec{e}_1 & A^k \vec{e}_2 & \cdots A^k \vec{e}_m \end{array} \right] = Q^{(k)} R^{(k)} \Rightarrow \left[\begin{array}{cc} A^k\vec{e}_1 & A^k \vec{e}_2 \end{array} \right] = \underline{Q}^{(k)}\underline{R}^{(k)} = \left[\begin{array}{cc} \underline{\vec{q}}_1^{(k)} & \underline{\vec{q}}_2^{(k)} \end{array} \right]\underline{R}^{(k)}$$ and as $k \to \infty$ we also have $\underline{\vec{q}}_i^{(k)} \to \vec{q}_i$. And we can generalize this method to obtain all the eigenvalues and eigenvectors of $A$. We first assume that $\left| \lambda_1 \right| > \left| \lambda_2 \right| > \cdots > \left| \lambda_m \right|$. And express the unit vector as the linear combination of $\vec{q}_1, \vec{q}_2, \dots , \vec{q}_m$: $\vec{e}_i = \displaystyle\sum_{j=1}^{m} z_{ij} \vec{q}_j$, $i.e.$, $$I = \left[\begin{array}{cccc} \vec{q}_1 & \vec{q}_2 & \cdots \vec{q}_m \end{array} \right] \Tran{\left[ \begin{array}{cccc} z_{11} & z_{12} & \cdots & z_{1m} \\ z_{21} & z_{22} & \cdots & z_{2m} \\ \vdots & \vdots & \ddots & \vdots \\ z_{m1} & z_{m2} & \cdots & z_{mm} \\ \end{array}\right]} = Q \Tran{Q}$$ which require that $\left| \begin{array}{cccc} z_{11} & z_{12} & \cdots & z_{1i} \\ z_{21} & z_{22} & \cdots & z_{2i} \\ \vdots & \vdots & \ddots & \vdots \\ z_{i1} & z_{i2} & \cdots & z_{ii} \\ \end{array}\right| \neq 0$ for $i = 1 , 2 , \dots , m$, $i.e.$, we assume that all the **leading principal minors of the matrix** $Q$ are nonsingular. And actually we have $\left[ \begin{array}{cccc} z_{11} & z_{12} & \cdots & z_{1m} \\ z_{21} & z_{22} & \cdots & z_{2m} \\ \vdots & \vdots & \ddots & \vdots \\ z_{m1} & z_{m2} & \cdots & z_{mm} \\ \end{array}\right] = Q$. Then as $k \to \infty$, we have for $j = 1, 2, \dots, m$ $$\left \langle A^k \vec{e}_1, A^k \vec{e}_2, \cdots, A^k \vec{e}_j \right \rangle \longrightarrow \left \langle \vec{q}_1, \vec{q}_2, \cdots, \vec{q}_j \right \rangle$$ Thus, each $\vec{q}_j$ is the limit of the component in $A^k \vec{e}_j$ orthogonal to the space $\left \langle A^k \vec{e}_1, A^k \vec{e}_2, \cdots, A^k \vec{e}_{j-1} \right \rangle$. And such orthogonal components can be obtained from the $QR$ factorization of the matrix $A^k$. $$A^k = \left[\begin{array}{cccc} A^k\vec{e}_1 & A^k \vec{e}_2 & \cdots A^k \vec{e}_m \end{array} \right] = \underline{Q}^{(k)} \underline{R}^{(k)} = \left[ \begin{array}{cccc} \underline{\vec{q}}_1^{(k)} & \underline{\vec{q}}_2^{(k)} & \cdots & \underline{\vec{q}}_m^{(k)} \end{array} \right] \underline{R}^{(k)}$$ And then as $k \to \infty$, $\underline{\vec{q}}_j^{(k)} \to \vec{q}_j$ for all possible $j$ with speed of each a ratio of $\left| \ffrac{\lambda_{j+1}} {\lambda_{j}}\right|$. In conclusion, for any real symmetric matrix $A$, as long as all its eigenvalues are *distinct* and all *leading principal minors* of the matrix $Q$ are nonsingular, then the orthogonal matrix $\underline{Q}^{(k)}$ from the $QR$ decomposition of $A^k$ converges to $Q$, then we can use Rayleigh quotient to find the corresponding eigenvalues. $Algorithm$ Given a real symmetric matrix $A$, we apply the **Simultaneous iteration** ``` AK = A for k = 1:n AK = A * AK end Q*R = AK ``` However, this method is not very stable. Since all columns of $A^K$ will converges to $\vec{q}_1$, so the condition number will get larger and larger with increasing $k$. Here's a more stable one. ``` AK = A for k = 1:n Q * R = AK AK = A * Q end ``` In addition, it's also hard to determine the convergence of the algorithm. See another one below. # QR algorithm without shift $Algorithm$ $QR$ algorithm without shift ```MATLAB A^{(0)} = A for k = 1:until convergence Q^{(k)} * R^{(k)} = A^{(k − 1)} A^{(k)} = R^{(k)} * Q^{(k)} end ``` For this algorithm, $A^{(k)}$ will converges to $\Lambda$. And what we gonna prove is for $k = 1, 2, \dots $, two equations hold. $$ A^k = Q^{(1)} Q^{(2)} \cdots Q^{(k)} R^{(k)} \cdots R^{(2)} R^{(1)} := \underline{Q}^{(k)}\underline{R}^{(k)} \\ A^{(k)}:=\Tran{\left( Q^{(1)} Q^{(2)} \cdots Q^{(k)} \right)} A \left( Q^{(1)} Q^{(2)} \cdots Q^{(k)} \right) $$ $Proof$ $$ A^1 = A^{(0)} = Q^{(1)} R^{(1)} := \underline{Q}^{(1)}\underline{R}^{(1)} \\ A^{(1)}:= R^{(1)} Q^{(1)} = \Tran{\left( Q^{(1)} \right)}Q^{(1)}R^{(1)} \left( Q^{(1)} \right) = \Tran{\left( Q^{(1)} \right)} A^{(0)} \left( Q^{(1)} \right) = \Tran{\left( Q^{(1)} \right)}A \left( Q^{(1)} \right) \\ Q^{(2)}R^{(2)} = A^{(1)} \\ A^{(2)}:= R^{(2)} Q^{(2)} = \Tran{\left( Q^{(2)} \right)}Q^{(2)}R^{(2)} \left( Q^{(2)} \right) = \Tran{\left( Q^{(2)} \right)} A^{(1)} \left( Q^{(2)} \right) = \Tran{\left( Q^{(2)} \right)} \Tran{\left( Q^{(1)} \right)}A \left( Q^{(1)} \right) \left( Q^{(2)} \right)\\ A^2 = A\left( Q^{(1)}R^{(1)} \right) = Q^{(1)}\left( \Tran{Q^{(1)}}A^{(1)}Q^{(1)} \right) R^{(1)} = Q^{(1)}\left( A^{(2)} \right) R^{(1)} = Q^{(1)}Q^{(2)}R^{(2)}R^{(1)} := \underline{Q}^{(2)}\underline{R}^{(2)} $$ Times after times of iteration, we have $$ A^{(k)} = \Tran{{\underline{Q}}^{(k)}} A {\underline{Q}}^{(k)}\\ A^k = \underline{Q}^{(k)}\underline{R}^{(k)} $$ so as $k \to \infty$ we have $\Tran{Q} A Q = \Lambda$. Here's the theorem $Theorem$ Let $A$ is a real symmetric matrix with eigenvalue decomposition $A = Q \lambda \Tran{Q}$, and $\left| \lambda_1 \right| > \left| \lambda_2 \right| > \cdots > \left| \lambda_m \right|$. Assume that all the leading principal minors of $Q$ are nonsingular. Then from the $QR$ algorithm, $A^{(k)}$ converges to $\Lambda$, as $k \to \infty$, with a linear convergence rate determined by $\max\limits_{1 \leq j < m} \ffrac{\left| \lambda_{j+1} \right|} {\left| \lambda_{j} \right|}$ # Deflation in the implementation of QR algorithm So actually the sequence of $A^{(k)}$ will converge to $\Lambda$ with entries other than on the diagonal close to $0$, at least less than a certain tolerance value, $\varepsilon$. $Algorithm$ ```MATLAB function [ Anew ] = qralg( A ) l = length( A ); while ( | A(l,l-1) | > varepsilon ) [ Q, R ] = qr( A ); A = R * Q; end Anew = A; end function [ eigens ] = qreigens( A ) for k = m:-1:2 Anew = qralg( A ); eigens( k ) = Anew( k,k ); A = Anew(1:k-1 , 1:k-1 ); end eigens(1) = A(1,1); end ``` So each time we obtain an eigenvalue in the downright corner of the matrix, we reduce the one less dimension to that matrix until all the eigenvalues are found. # QR algorithm with shift Compared with power iteration, the inverse iteration and the Rayleigh quotient iteration converge faster when *shift value* is sufficiently close to a true eigenvalue of $A$. And when doing inverse iteration we can introduce a shift into the $QR$ algorithm and make it converges faster. See the algorithm first $$A^{k} = \underline{Q}^{(k)} \underline{R}^{k} \Rightarrow A^{-k} = \left( \underline{R}^{(k)} \right)^{-1} \left( \underline{Q}^{(k)} \right)^{-1} = \left( \underline{R}^{(k)} \right)^{-1} \Tran{\left( \underline{Q}^{(k)} \right)} \\ $$ Then since $A$ is symmetric, we have $\left( \Tran{A} \right)^{-k} = A^{-k} = \left( \underline{Q}^{(k)} \right) \left( \Tran{ \underline{R}^{(k)}} \right)^{-1}$. Notice that here $\left( \Tran{ \underline{R}^{(k)}} \right) ^{-1}$ is an lower triangular matrix. Then we denote $P$ the $m$ by $m$ permutation matrix. $P = \left[ \begin{array}{cccc} & & & 1 \\ & & 1 & \\ & \ddots & & \\ 1 & & & \end{array}\right] = \left[ \begin{array}{cccc} \vec{e}_m & \vec{e}_{m-1} & \cdots & \vec{e}_1 \end{array}\right]$. Then we have $$A^{-k}P = \left( \underline{Q}^{(k)}P \right) P\left( \Tran{ \underline{R}^{(k)}} \right)^{-1}P$$ So then we denote $P\left( \Tran{ \underline{R}^{(k)}} \right)^{-1}P$ as $\tilde{\underline{R}}^{(k)}$, we have the following fomula $$\left( A^{-1} \right)^{k} \left[ \begin{array}{cccc} \vec{e}_m & \vec{e}_{m-1} & \cdots & \vec{e}_1 \end{array}\right] = \left[ \begin{array}{cccc} \underline{\vec{q}}_m^{(k)} & \underline{\vec{q}}_{m-1}^{(k)} & \cdots & \underline{\vec{q}}_1^{(k)} \end{array}\right]\tilde{\underline{R}}^{(k)} $$ And the $QR$ algorithm with a constant shift value $\mu$ is as following. $Algorithm$ ``` A^{(0)} = A; for k = 1:n Q^{(k)} * R^{(k)} = A^{(k-1)} - \mu * I; A^{(k)} = R^{(k)} Q^{(k)} + \mu * I; end ``` This is essentially the $QR$ algorithm on the matrix $A - \mu I$. So similarly we have $$ \left( A - \mu I \right)^k = Q^{(1)} Q^{(2)} \cdots Q^{(k)} R^{(k)} \cdots R^{(2)} R^{(1)} := \underline{Q}^{(k)}\underline{R}^{(k)} \\ A^{(k)}:=\Tran{\left( Q^{(1)} Q^{(2)} \cdots Q^{(k)} \right)} A \left( Q^{(1)} Q^{(2)} \cdots Q^{(k)} \right) $$ Then written in terms of the inverse iteration, we have $$\left( \left( A-\mu I \right)^{-1} \right)^{k} \left[ \begin{array}{cccc} \vec{e}_m & \vec{e}_{m-1} & \cdots & \vec{e}_1 \end{array}\right] = \left[ \begin{array}{cccc} \underline{\vec{q}}_m^{(k)} & \underline{\vec{q}}_{m-1}^{(k)} & \cdots & \underline{\vec{q}}_1^{(k)} \end{array}\right]\tilde{\underline{R}}^{(k)} $$ Still the $\underline{\vec{q}}_{m}^{(k)}$ will converge to the eigenvalues of $A$, but the speed is now determined by the ratio $\ffrac{\left| \lambda_m - \mu \right|} {\left| \lambda_{m - 1} - \mu \right|}$, and we can even update the shift value $\mu$ in each iteration as we did before in the Rayleigh quotient iteration $Algorithm$ ``` A^{(0)} = A; for k = 1:n Q^{(k)} R^{(k)} = A^{(k-1)} - \mu^{(k)} * I A^{(k)} = R^{(k)} Q^{(k)} + \mu^{(k)} * I end ``` *** But how to find that shift value, and how to update each time? We want the $\mu^{(k)}$ are sufficiently close to $\lambda_m$, so when the convergence to $\lambda_m$ is achieved, we not only deflate the matrix to a smaller one but also shift the value of $\mu^{(k)} = A^{(k-1)}(m,m)$. Here's the explanation: From Rayleigh quotient, $\Tran{ \underline{\vec{q}}_m^{(k-1)} }A\underline{\vec{q}}_m^{(k-1)}$ and since $A^{(k-1)} = \Tran{ \left( \underline{Q}^{(k-1)} \right) } A \underline{Q}^{(k-1)}$, $$\Tran{ \underline{\vec{q}}_m^{(k-1)} }A\underline{\vec{q}}_m^{(k-1)} = \left( \Tran{\vec{e}_{m}} \Tran{ \left( \underline{Q}^{(k-1)} \right) } \right) A \left( \underline{Q}^{(k-1)}\vec{e}_{m} \right) = \Tran{\vec{e}_{m}}A^{(k-1)}\vec{e}_{m} = A^{(k-1)}(m,m) := \mu^{(k)}$$ And there's a better shift: *Wilkinson shift*, defined as follows: the one eigenvalue of the lower-rightmost $2 \times 2$ submatrix of $A^{(k-1)}$ that is closer to $A^{(k-1)}_{m,m}$. $$\left[ \begin{array}{cc} A^{(k-1)}_{m-1,m-1} & A^{(k-1)}_{m-1,m} \\[1em] A^{(k-1)}_{m,m-1} & A^{(k-1)}_{m,m} \end{array} \right]$$ And we can write that as $$\mu^{(k)} = A^{(k-1)}_{m,m} - \DeclareMathOperator*{\sign}{sign} \frac{ \sign \left( \delta \right) {A^{(k-1)} _{m,m-1}}^2} { \left| \delta \right| + \sqrt{ \delta^2 + {A^{(k-1)} _{m,m-1}}^2} } \\[1em] \delta = \frac{\left( A^{(k-1)} _{m-1,m-1} - A^{(k-1)} _{m,m} \right)} {2}$$
github_jupyter
# Bias Removal Climate models can have biases relative to different verification datasets. Commonly, biases are removed by postprocessing before verification of forecasting skill. `climpred` provides convenience functions to do so. ``` import climpred import xarray as xr import matplotlib.pyplot as plt from climpred import HindcastEnsemble hind = climpred.tutorial.load_dataset('CESM-DP-SST') # CESM-DPLE hindcast ensemble output. obs = climpred.tutorial.load_dataset('ERSST') # observations hind["lead"].attrs["units"] = "years" ``` We begin by removing a mean climatology for the observations, since `CESM-DPLE` generates its anomalies over this same time period. ``` obs = obs - obs.sel(time=slice('1964', '2014')).mean('time') hindcast = HindcastEnsemble(hind) hindcast = hindcast.add_observations(obs) hindcast.plot() ``` The warming of the `observations` is similar to `initialized`. ## Mean bias removal Typically, bias depends on lead-time and therefore should therefore also be removed depending on lead-time. ``` bias = hindcast.verify(metric='bias', comparison='e2o', dim=[], alignment='same_verifs') bias.SST.plot() ``` Against `observations`, there is small cold bias in 1980 and 1990 initialization years and warm bias before and after. ``` # lead-time dependant mean bias over all initializations is quite small but negative mean_bias = bias.mean('init') mean_bias.SST.plot() ``` ### Cross Validatation To remove the mean bias quickly, the mean bias over all initializations is subtracted. For formally correct bias removal with cross validation, the given initialization is left out when subtracting the mean bias. `climpred` wraps these functions in `HindcastEnsemble.remove_bias(how='mean', cross_validate={bool})`. ``` hindcast.remove_bias(how='mean', cross_validate=True, alignment='same_verifs').plot() plt.title('hindcast lead timeseries removed for unconditional mean bias') plt.show() ``` ## Skill Distance-based accuracy metrics like (`mse`,`rmse`,`nrmse`,...) are sensitive to mean bias removal. Correlations like (`pearson_r`, `spearman_r`) are insensitive to bias correction. ``` metric='rmse' hindcast.verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs')['SST'].plot(label='no bias correction') hindcast.remove_bias(cross_validate=False, alignment='same_verifs') \ .verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs').SST.plot(label='bias correction without cross validation') hindcast.remove_bias(cross_validate=True, alignment='same_verifs') \ .verify(metric=metric, comparison='e2o', dim='init', alignment='same_verifs').SST.plot(label='formally correct bias correction with cross validation') plt.legend() plt.title(f"{metric.upper()} SST evaluated against observations") plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/yukinaga/minnano_ai/blob/master/section_7/ml_libraries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # 機械学習ライブラリ 機械学習ライブラリ、KerasとPyTorchのコードを紹介します。 今回はコードの詳しい解説は行いませんが、実装の大まかな流れを把握しましょう。 ## ● Kerasのコード 以下のコードは、Kerasによるシンプルなニューラルネットワークの実装です。 Irisの各花を、SetosaとVersicolorに分類します。 以下のコードでは、`Sequential`でモデルを作り、層や活性化関数を追加しています。 ``` import numpy as np from sklearn import datasets from sklearn import preprocessing from sklearn.model_selection import train_test_split iris = datasets.load_iris() iris_data = iris.data sl_data = iris_data[:100, 0] # SetosaとVersicolor、Sepal length sw_data = iris_data[:100, 1] # SetosaとVersicolor、Sepal width # 平均値を0に sl_ave = np.average(sl_data) # 平均値 sl_data -= sl_ave # 平均値を引く sw_ave = np.average(sw_data) sw_data -= sw_ave # 入力をリストに格納 input_data = [] correct_data = [] for i in range(100): input_data.append([sl_data[i], sw_data[i]]) correct_data.append([iris.target[i]]) # 訓練データとテストデータに分割 input_data = np.array(input_data) # NumPyの配列に変換 correct_data = np.array(correct_data) x_train, x_test, t_train, t_test = train_test_split(input_data, correct_data) # ------ ここからKerasのコード ------ from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Activation from keras.optimizers import SGD model = Sequential() model.add(Dense(2, input_dim=2)) # 入力:2、中間層のニューロン数:2 model.add(Activation("sigmoid")) # シグモイド関数 model.add(Dense(1)) # 出力層のニューロン数:1 model.add(Activation("sigmoid")) # シグモイド関数 model.compile(optimizer=SGD(lr=0.3), loss="mean_squared_error", metrics=["accuracy"]) model.fit(x_train, t_train, epochs=32, batch_size=1) # 訓練 loss, accuracy = model.evaluate(x_test, t_test) print("正解率: " + str(accuracy*100) + "%") ``` ## ● PyTorchのコード 以下のコードは、PyTorchよるシンプルなニューラルネットワークの実装です。 Irisの各花を、SetosaとVersicolorに分類します。 以下のコードでは、Kerasと同様に`Sequential`でモデルを作り、層や活性化関数を並べています。 PyTorchでは、入力や正解をTensor形式のデータに変換する必要があります。 ``` import numpy as np from sklearn import datasets from sklearn import preprocessing from sklearn.model_selection import train_test_split iris = datasets.load_iris() iris_data = iris.data sl_data = iris_data[:100, 0] # SetosaとVersicolor、Sepal length sw_data = iris_data[:100, 1] # SetosaとVersicolor、Sepal width # 平均値を0に sl_ave = np.average(sl_data) # 平均値 sl_data -= sl_ave # 平均値を引く sw_ave = np.average(sw_data) sw_data -= sw_ave # 入力をリストに格納 input_data = [] correct_data = [] for i in range(100): input_data.append([sl_data[i], sw_data[i]]) correct_data.append([iris.target[i]]) # 訓練データとテストデータに分割 input_data = np.array(input_data) # NumPyの配列に変換 correct_data = np.array(correct_data) x_train, x_test, t_train, t_test = train_test_split(input_data, correct_data) # ------ ここからPyTorchのコード ------ import torch from torch import nn from torch import optim # Tensorに変換 x_train = torch.tensor(x_train, dtype=torch.float32) t_train = torch.tensor(t_train, dtype=torch.float32) x_test = torch.tensor(x_test, dtype=torch.float32) t_test = torch.tensor(t_test, dtype=torch.float32) net = nn.Sequential( nn.Linear(2, 2), # 入力:2、中間層のニューロン数:2 nn.Sigmoid(), # シグモイド関数 nn.Linear(2, 1), # 出力層のニューロン数:1 nn.Sigmoid() # シグモイド関数 ) loss_fnc = nn.MSELoss() optimizer = optim.SGD(net.parameters(), lr=0.3) # 1000エポック学習 for i in range(1000): # 勾配を0に optimizer.zero_grad() # 順伝播 y_train = net(x_train) y_test = net(x_test) # 誤差を求める loss_train = loss_fnc(y_train, t_train) loss_test = loss_fnc(y_test, t_test) # 逆伝播(勾配を求める) loss_train.backward() # パラメータの更新 optimizer.step() if i%100 == 0: print("Epoch:", i, "Loss_Train:", loss_train.item(), "Loss_Test:", loss_test.item()) y_test = net(x_test) count = ((y_test.detach().numpy()>0.5) == (t_test.detach().numpy()==1.0)).sum().item() print("正解率: " + str(count/len(y_test)*100) + "%") ```
github_jupyter
``` import tqdm from tqdm import tqdm_notebook import time import numpy as np import matplotlib import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as axes3d import matplotlib.ticker as ticker # import warnings # warnings.filterwarnings('ignore') import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os # fonts for ICML def SetPlotRC(): #If fonttype = 1 doesn't work with LaTeX, try fonttype 42. plt.rc('pdf',fonttype = 42) plt.rc('ps',fonttype = 42) SetPlotRC() def get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title, num_show = []): acc_num = 3 dist_calc_num = 7 df = [] for i in range(len(file_names)): file_name = os.path.expanduser(file_names[i]) data = np.genfromtxt(file_name, dtype=('U10','U10','U10',float,'U10',int,'U10',int,'U10',float)).tolist() cur_line = -1 for mod in range(num_models[i]): for j in range(num_exper[i]): cur_line += 1 if y_axe == 11: df.append([1.00001 - data[cur_line][acc_num], 1 / data[cur_line][y_axe], model_names[i][mod], title]) else: df.append([1.00001 - data[cur_line][acc_num], data[cur_line][y_axe], model_names[i][mod], title]) df = pd.DataFrame(df, columns=["Error = 1 - Recall@1", y_axe_name, "algorithm", "title"]) # print(df.shape) if len(num_show) > 0: it = 0 itt = 0 num_for_iloc = [] model_names_list = [] for i in range(len(file_names)): for mod in range(len(model_names[i])): model_names_list.append(model_names[i][mod]) allowed_set = set() same_dict = dict() for i in range(len(file_names)): for mod in range(len(model_names[i])): if itt in num_show: allowed_set.add(model_names_list[i]) for j in range(num_exper[i]): num_for_iloc.append(it) it += 1 else: it += num_exper[i] itt += 1 df = df.iloc[num_for_iloc] return df def show_results(frames, title, y_axe_name, x_log=True, y_log=False, dims=(18, 12), save=False, file_name='trash'): size = len(frames) ylim = [[500, 5000], [0, 1000],[0, 1000],[0, 1000]] a4_dims = dims fig, axs = plt.subplots(2, 2, figsize=a4_dims) for i in range(2): for j in range(2): num = i * 2 + j if i + j == 2: sns.lineplot(x="Error = 1 - Recall@1", y=y_axe_name,hue="algorithm", markers=True, style="algorithm", dashes=False, data=frames[num], ax=axs[i, j], linewidth=3, ms=15) else: sns.lineplot(x="Error = 1 - Recall@1", y=y_axe_name,hue="algorithm", markers=True, style="algorithm", dashes=False, data=frames[num], ax=axs[i, j], legend=False, linewidth=3, ms=15) axs[i, j].set_title(title[num], size='30') lx = axs[i, j].get_xlabel() ly = axs[i, j].get_ylabel() axs[i, j].set_xlabel(lx, fontsize=25) axs[i, j].set_ylabel(ly, fontsize=25) axs[i, j].tick_params(axis='both', which='both', labelsize=25) axs[i, j].set_ymargin(0.075) if i == 0: axs[i, j].set_xlabel('') if j == 1: axs[i, j].set_ylabel('') # ApplyFont(axs[i,j]) plt.legend(loc=2, bbox_to_anchor=(1.05, 1, 0.5, 0.5), fontsize='30', markerscale=3, borderaxespad=0.) if y_log: for i in range(2): for j in range(2): axs[i, j].set(yscale="log") if x_log: for i in range(2): for j in range(2): axs[i, j].set(xscale="log")# num_exper = [6, 6, 3] if save: fig.savefig(file_name + ".pdf", bbox_inches='tight') path = '~/Desktop/results/synthetic_n_10_6_d_' # path = '~/results/synthetic_n_10_6_d_' y_axe = 7 y_axe_name = "dist calc" model_names = [['kNN', 'kNN + Kl', 'kNN + Kl + llf', 'kNN + beam', 'kNN + beam + Kl + llf']] num_show = [0, 1, 2, 3, 4] num_exper = [5] num_models = [5] file_names = [path + '3.txt'] df_2 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) # print(df_2) num_exper = [6] num_models = [5] file_names = [path + '5.txt'] df_4 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '9.txt'] df_8 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '17.txt'] df_16 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) frames = [df_2, df_4, df_8, df_16] show_results(frames, ['d = 2', 'd = 4', 'd = 8', 'd = 16'], y_axe_name, y_log=False, x_log=True, dims=(24, 14), save=False, file_name='synthetic_datasets_2_2_final') ``` ## Supplementary ``` def show_results_dist_1_3(frames, title, y_axe_name, dims=(18, 12), save=False, file_name='trash', legend_size=13): size = len(frames) a4_dims = dims fig, axs = plt.subplots(1, 3, figsize=a4_dims) for i in range(3): sns.lineplot(x="Error = 1 - Recall@1", y=y_axe_name,hue="algorithm", markers=True, style="algorithm", dashes=False, data=frames[i], ax=axs[i], linewidth=2, ms=10) axs[i].set_title(title[i], size='20') lx = axs[i].get_xlabel() ly = axs[i].get_ylabel() axs[i].set_xlabel(lx, fontsize=20) axs[i].set_ylabel(ly, fontsize=20) axs[i].set_xscale('log') if i == 0: axs[i].set_xticks([0.001, 0.01, .1]) else: axs[i].set_xticks([0.01, 0.1]) axs[i].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) axs[i].tick_params(axis='both', which='both', labelsize=15) if i > 0: axs[i].set_ylabel('') plt.setp(axs[i].get_legend().get_texts(), fontsize=legend_size) if save: fig.savefig(file_name + ".pdf", bbox_inches='tight') y_axe = 7 y_axe_name = "dist calc" model_names = [['kNN', 'kNN + Kl + llf 4', 'kNN + Kl + llf 8', 'kNN + Kl + llf 16', 'kNN + Kl + llf 32']] num_show = [0, 1, 2, 3, 4] num_exper = [6] num_models = [5] file_names = [path + '5_nlt.txt'] df_kl_4 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '9_nlt.txt'] df_kl_8 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '17_nlt.txt'] df_kl_16 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) frames = [df_kl_4, df_kl_8, df_kl_16] show_results_dist_1_3(frames, ['d = 4', 'd = 8', 'd = 16'], y_axe_name, dims=(20, 6), save=False, file_name='suppl_figure_optimal_kl_number') path_end = '_llt.txt' y_axe = 7 y_axe_name = "dist calc" model_names = [['kNN', 'thrNN', 'kNN + Kl-dist + llf', 'kNN + Kl-rank + llf', 'kNN + Kl-rank sample + llf']] num_show = [0, 1, 2, 3, 4] num_exper = [6] num_models = [5] file_names = [path + '5' + path_end] df_kl_4 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '9' + path_end] df_kl_8 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) num_exper = [6] num_models = [5] file_names = [path + '17' + path_end] df_kl_16 = get_df(num_models, num_exper, file_names, model_names, y_axe, y_axe_name, title="trash", num_show=num_show) frames = [df_kl_4, df_kl_8, df_kl_16] show_results_dist_1_3(frames, ['d = 4', 'd = 8', 'd = 16'], y_axe_name, dims=(20, 6), save=False, file_name='suppl_figure_optimal_kl_type', legend_size=10) path_start = "~/Desktop/results/distr_to_1_" path_end = ".txt" a4_dims = (7, 3) fig, ax = plt.subplots(figsize=a4_dims) ax.set_yticks([]) file_name = path_start + "sift" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="SIFT") file_name = path_start + "d_9" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=8") file_name = path_start + "d_17" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=16") file_name = path_start + "d_33" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=32") file_name = path_start + "d_65" + path_end file_name = os.path.expanduser(file_name) distr = np.genfromtxt(file_name) sns.distplot(distr, label="d=64") plt.legend() fig.savefig("suppl_dist_disrt.pdf", bbox_inches='tight') ```
github_jupyter
# Least-squares technique ## References - Statistics in geography: https://archive.org/details/statisticsingeog0000ebdo/ ## Imports ``` from functools import partial import numpy as np from scipy.stats import multivariate_normal, t import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from ipywidgets import interact, IntSlider inv = np.linalg.inv df = pd.read_csv('regression_data.csv') df.head(3) ``` ## Population 0.5 and 0.2 are NOT the population parameters. Although we used them to generate the population, the population parameters can be different from them. ``` def get_y(x): ys = x * 0.5 + 0.2 noises = 1 * np.random.normal(size=len(ys)) return ys + noises np.random.seed(52) xs = np.linspace(0, 10, 10000) ys = get_y(xs) np.random.seed(32) np.random.shuffle(xs) np.random.seed(32) np.random.shuffle(ys) plt.scatter(xs, ys, s=5) plt.show() ``` ## Design matrices ``` PHI = xs.reshape(-1, 1) PHI = np.hstack([ PHI, np.ones(PHI.shape) ]) T = ys.reshape(-1, 1) ``` ## Normal equation with regularization ``` def regularized_least_squares(PHI, T, regularizer=0): assert PHI.shape[0] == T.shape[0] pseudo_inv = inv(PHI.T @ PHI + np.eye(PHI.shape[1]) * regularizer) assert pseudo_inv.shape[0] == pseudo_inv.shape[1] W = pseudo_inv @ PHI.T @ T return {'slope' : float(W[0]), 'intercept' : float(W[1])} ``` ## Sampling distributions ### Population parameters ``` pop_params = regularized_least_squares(PHI, T) pop_slope, pop_intercept = pop_params['slope'], pop_params['intercept'] ``` ### Sample statistics Verify that the sampling distribution for both regression coefficients are normal. ``` n = 10 # sample size num_samps = 1000 def sample(PHI, T, n): idxs = np.random.randint(PHI.shape[0], size=n) return PHI[idxs], T[idxs] samp_slopes, samp_intercepts = [], [] for i in range(num_samps): PHI_samp, T_samp = sample(PHI, T, n) learned_param = regularized_least_squares(PHI_samp, T_samp) samp_slopes.append(learned_param['slope']); samp_intercepts.append(learned_param['intercept']) np.std(samp_slopes), np.std(samp_intercepts) fig = plt.figure(figsize=(12, 4)) fig.add_subplot(121) sns.kdeplot(samp_slopes) plt.title('Sample distribution of sample slopes') fig.add_subplot(122) sns.kdeplot(samp_intercepts) plt.title('Sample distribution of sample intercepts') plt.show() ``` Note that the two normal distributions above are correlated. This means that we need to be careful when plotting the 95% CI for the regression line, because we can't just plot the regression line with the highest slope and the highest intercept and the regression line with the lowest slope and the lowest intercept. ``` sns.jointplot(samp_slopes, samp_intercepts, s=5) plt.show() ``` ## Confidence interval **Caution.** The following computation of confidence intervals does not apply to regularized least squares. ### Sample one sample ``` n = 500 PHI_samp, T_samp = sample(PHI, T, n) ``` ### Compute sample statistics ``` learned_param = regularized_least_squares(PHI_samp, T_samp) samp_slope, samp_intercept = learned_param['slope'], learned_param['intercept'] samp_slope, samp_intercept ``` ### Compute standard errors of sample statistics Standard error is the estimate of the standard deviation of the sampling distribution. $$\hat\sigma = \sqrt{\frac{\text{Sum of all squared residuals}}{\text{Degrees of freedom}}}$$ Standard error for slope: $$\text{SE}(\hat\beta_1)=\hat\sigma \sqrt{\frac{1}{(n-1)s_X^2}}$$ Standard error for intercept: $$\text{SE}(\hat\beta_0)=\hat\sigma \sqrt{\frac{1}{n} + \frac{\bar X^2}{(n-1)s_X^2}}$$ where $\bar X$ is the sample mean of the $X$'s and $s_X^2$ is the sample variance of the $X$'s. ``` preds = samp_slope * PHI_samp[:,0] + samp_intercept sum_of_squared_residuals = np.sum((T_samp.reshape(-1) - preds) ** 2) samp_sigma_y_give_x = np.sqrt(sum_of_squared_residuals / (n - 2)) samp_sigma_y_give_x samp_mean = np.mean(PHI_samp[:,0]) samp_var = np.var(PHI_samp[:,0]) SE_slope = samp_sigma_y_give_x * np.sqrt(1 / ((n - 1) * samp_var)) SE_intercept = samp_sigma_y_give_x * np.sqrt(1 / n + samp_mean ** 2 / ((n - 1) * samp_var)) SE_slope, SE_intercept ``` ### Compute confidence intervals for sample statistics ``` slope_lower, slope_upper = samp_slope - 1.96 * SE_slope, samp_slope + 1.96 * SE_slope slope_lower, slope_upper intercept_lower, intercept_upper = samp_intercept - 1.96 * SE_intercept, samp_intercept + 1.96 * SE_intercept intercept_lower, intercept_upper ``` ### Compute confidence interval for regression line #### Boostrapped solution Use a 2-d Guassian to model the joint distribution between boostrapped sample slopes and boostrapped sample intercepts. **Fixed.** `samp_slopes` and `samp_intercepts` used in the cell below are not boostrapped; they are directly sampled from the population. Next time, add the boostrapped version. Using `samp_slopes` and `samp_intercepts` still has its value, though; it shows the population regression line lie right in the middle of all sample regression lines. Remember that, when ever you use bootstrapping to estimate the variance / covariance of the sample distribution of some statistic, there might be an equation that you can use from statistical theory. ``` num_resamples = 10000 resample_slopes, resample_intercepts = [], [] for i in range(num_resamples): PHI_resample, T_resample = sample(PHI_samp, T_samp, n=len(PHI_samp)) learned_params = regularized_least_squares(PHI_resample, T_resample) resample_slopes.append(learned_params['slope']); resample_intercepts.append(learned_params['intercept']) ``` **Fixed.** The following steps might improve the results, but I don't think they are part of the standard practice. ``` # means = [np.mean(resample_slopes), np.mean(resample_intercepts)] # cov = np.cov(resample_slopes, resample_intercepts) # model = multivariate_normal(mean=means, cov=cov) ``` Sample 5000 (slope, intercept) pairs from the Gaussian. ``` # num_pairs_sampled = 10000 # pairs = model.rvs(num_pairs_sampled) ``` Scatter samples, plot regression lines and CI. ``` plt.figure(figsize=(20, 10)) plt.scatter(PHI_samp[:,0], T_samp.reshape(-1), s=20) # sample granularity = 1000 xs = np.linspace(0, 10, granularity) plt.plot(xs, samp_slope * xs + samp_intercept, label='Sample') # sample regression line plt.plot(xs, pop_slope * xs + pop_intercept, '--', color='black', label='Population') # population regression line lines = np.zeros((num_resamples, granularity)) for i, (slope, intercept) in enumerate(zip(resample_slopes, resample_intercepts)): lines[i] = slope * xs + intercept confidence_level = 95 uppers_95 = np.percentile(lines, confidence_level + (100 - confidence_level) / 2, axis=0) lowers_95 = np.percentile(lines, (100 - confidence_level) / 2, axis=0) confidence_level = 99 uppers_99 = np.percentile(lines, confidence_level + (100 - confidence_level) / 2, axis=0) lowers_99 = np.percentile(lines, (100 - confidence_level) / 2, axis=0) plt.fill_between(xs, lowers_95, uppers_95, color='grey', alpha=0.7, label='95% CI') plt.plot(xs, uppers_99, color='grey', label='99% CI') plt.plot(xs, lowers_99, color='grey') plt.legend() plt.show() ``` #### Analytic solution **Reference.** Page 97, Statistics of Geograph: A Practical Approach, David Ebdon, 1987. For a particular value $x_0$ of the independent variable $x$, its confidence interval is given by: $$\sqrt{\frac{\sum e^{2}}{n-2}\left[\frac{1}{n}+\frac{\left(x_{0}-\bar{x}\right)^{2}}{\sum x^{2}-n \bar{x}^{2}}\right]}$$ where - $\sum e^2$ is the sum of squares of residuals from regression, - $x$ is the independent variables, - $\bar{x}$ is the sample mean of the independent variables. ``` sum_of_squared_xs = np.sum(PHI_samp[:,0] ** 2) SEs = np.sqrt( (sum_of_squared_residuals / (n - 2)) * (1 / n + (xs - samp_mean) ** 2 / (sum_of_squared_xs - n * samp_mean ** 2)) ) t_97dot5 = t.ppf(0.975, df=n-2) t_99dot5 = t.ppf(0.995, df=n-2) yhats = samp_slope * xs + samp_intercept uppers_95 = yhats + t_97dot5 * SEs lowers_95 = yhats - t_97dot5 * SEs uppers_99 = yhats + t_99dot5 * SEs lowers_99 = yhats - t_99dot5 * SEs plt.figure(figsize=(20, 10)) plt.scatter(PHI_samp[:,0], T_samp.reshape(-1), s=20) # sample granularity = 1000 xs = np.linspace(0, 10, granularity) plt.plot(xs, samp_slope * xs + samp_intercept, label='Sample') # sample regression line plt.plot(xs, pop_slope * xs + pop_intercept, '--', color='black', label='Population') # population regression line plt.fill_between(xs, lowers_95, uppers_95, color='grey', alpha=0.7, label='95% CI') plt.plot(xs, uppers_99, color='grey', label='99% CI') plt.plot(xs, lowers_99, color='grey') plt.legend() plt.show() ``` ## Regularized least squares ``` def plot_regression_line(PHI, T, regularizer): plt.scatter(PHI[:,0], T, s=5) params = regularized_least_squares(PHI, T, regularizer) x_min, x_max = PHI[:,0].min(), PHI[:,0].max() xs = np.linspace(x_min, x_max, 2) ys = params['slope'] * xs + params['intercept'] plt.plot(xs, ys, color='orange') plt.ylim(-3, 10) plt.show() plot_regression_line(PHI, T, regularizer=20) def plot_regression_line_wrapper(regularizer, num_points): plot_regression_line(PHI[:num_points], T[:num_points], regularizer) ``` Yes! The effect of regularization does change with the size of the dataset. ``` _ = interact( plot_regression_line_wrapper, regularizer=IntSlider(min=0, max=10000, value=5000, continuous_update=False), num_points=IntSlider(min=2, max=1000, value=1000, continuous_update=False) ) ```
github_jupyter
# Callin Switzer ## Modifications to TLD code for ODE system ___ ``` from matplotlib import pyplot as plt %matplotlib inline from matplotlib import cm import numpy as np import os import scipy.io import seaborn as sb import matplotlib.pylab as pylab # forces plots to appear in the ipython notebook %matplotlib inline from scipy.integrate import odeint from pylab import plot,xlabel,ylabel,title,legend,figure,subplots import random import time from pylab import cos, pi, arange, sqrt, pi, array import sys sb.__version__ sys.executable sys.version def FlyTheBug(state,t): # unpack the state vector x,xd,y,yd,theta,thetad,phi,phid = state # displacement,x and velocity xd etc... You got it?' # compute acceleration xdd = x'' # Jorge's order . x,y,theta,phi,xd,yd,thetad,phid # . there is no entry for Q(2) ... which would be y. I wonder why not? #Reynolds number calculation: Re_head = rhoA*(np.sqrt((xd**2)+(yd**2)))*(2*bhead)/muA; #dimensionless number Re_butt = rhoA*(np.sqrt((xd**2)+(yd**2)))*(2*bbutt)/muA; #dimensionless number #Coefficient of drag stuff: Cd_head = 24/np.abs(Re_head) + 6/(1 + np.sqrt(np.abs(Re_head))) + 0.4; Cd_butt = 24/np.abs(Re_butt) + 6/(1 + np.sqrt(np.abs(Re_butt))) + 0.4; h1 = m1 + m2; h2 = (-1)*L1*m1*np.sin(theta); h3 = (-1)*L2*m2*np.sin(phi); h4 = L1*m1*np.cos(theta); h5 = L2*m2*np.cos(phi); h6 = (-1)*F*np.cos(alpha+theta)+(1/2)*Cd_butt*rhoA*S_butt*np.abs(xd)*xd+(1/2)*Cd_head*rhoA*S_head*np.abs(xd)*xd+(-1)*L1*m1*np.cos(theta)*thetad**2+(-1)*L2*m2*np.cos(phi)*phid**2 h7 = g*(m1+m2)+(1/2)*Cd_butt*rhoA*S_butt*np.abs(yd)*yd+(1/2)*Cd_head*rhoA*S_head*np.abs(yd)*yd+(-1)*L1*m1*thetad**2*np.sin(theta)+(-1)*F*np.sin(alpha+theta)+(-1)*L2*m2*phid**2*np.sin(phi); h8 = (-1)*tau0+g*L1*m1*np.cos(theta)+(-1)*K*((-1)*betaR+(-1)*pi+(-1)*theta+phi)+(-1)*c*((-1)*thetad+phid)+(-1)*F*L3*np.sin(alpha); h9 = tau0+g*L2*m2*np.cos(phi)+K*((-1)*betaR+(-1)*pi+(-1)*theta+phi)+c*((-1)*thetad+phid); h10 = I1+L1**2*m1 h11 = I2+L2**2*m2 xdd = (-1)*(h10*h11*h1**2+(-1)*h11*h1*h2**2+(-1)*h10*h1*h3**2+(-1)*h11*h1*h4**2+h3**2*h4**2+(-2)*h2* h3*h4*h5+(-1)*h10*h1*h5**2+h2**2*h5**2)**(-1)*( h10*h11*h1*h6+(-1)*h11*h4**2*h6+(-1)*h10*h5**2* h6+h11*h2*h4*h7+h10*h3*h5*h7+(-1)*h11*h1*h2* h8+(-1)*h3*h4*h5*h8+h2*h5**2*h8+(-1)*h10*h1* h3*h9+h3*h4**2*h9+(-1)*h2*h4*h5*h9) ydd = (-1)*((-1)*h10*h11*h1**2+h11*h1*h2**2+h10*h1* h3**2+h11*h1*h4**2+(-1)*h3**2*h4**2+2*h2*h3*h4* h5+h10*h1*h5**2+(-1)*h2**2*h5**2)**(-1)*((-1)*h11* h2*h4*h6+(-1)*h10*h3*h5*h6+(-1)*h10*h11*h1* h7+h11*h2**2*h7+h10*h3**2*h7+h11*h1*h4*h8+(-1)* h3**2*h4*h8+h2*h3*h5*h8+h2*h3*h4*h9+h10*h1* h5*h9+(-1)*h2**2*h5*h9) thetadd = (-1)*((-1)*h10*h11*h1**2+h11*h1*h2**2+h10*h1* h3**2+h11*h1*h4**2+(-1)*h3**2*h4**2+2*h2*h3*h4* h5+h10*h1*h5**2+(-1)*h2**2*h5**2)**(-1)*(h11*h1* h2*h6+h3*h4*h5*h6+(-1)*h2*h5**2*h6+h11*h1* h4*h7+(-1)*h3**2*h4*h7+h2*h3*h5*h7+(-1)*h11* h1**2*h8+h1*h3**2*h8+h1*h5**2*h8+(-1)*h1*h2* h3*h9+(-1)*h1*h4*h5*h9); phidd = (-1)*((-1)*h10*h11*h1**2+h11*h1*h2**2+h10*h1* h3**2+h11*h1*h4**2+(-1)*h3**2*h4**2+2*h2*h3*h4* h5+h10*h1*h5**2+(-1)*h2**2*h5**2)**(-1)*(h10*h1* h3*h6+(-1)*h3*h4**2*h6+h2*h4*h5*h6+h2*h3*h4* h7+h10*h1*h5*h7+(-1)*h2**2*h5*h7+(-1)*h1*h2* h3*h8+(-1)*h1*h4*h5*h8+(-1)*h10*h1**2*h9+h1* h2**2*h9+h1*h4**2*h9) return [xd, xdd,yd,ydd,thetad,thetadd,phid,phidd] # Bunches of parameters ... these don't vary from run to run #masses and moment of inertias in terms of insect density and eccentricity #of the head/thorax & gaster # oh.. and I'm offline -- so I just made up a bunch of numbers. bhead = 0.507 ahead = 0.908 bbutt = 0.1295 abutt = 1.7475 rho = 1 #cgs density of insect rhoA = 0.00118 #cgs density of air muA = 0.000186 #cgs viscosity L1 = 0.908 #Length from the thorax-abdomen joint to the center of the #head-thorax mass in cm L2 = 1.7475 #Length from the thorax-abdomen joint to the center of the #abdomen mass in cm L3 = 0.75 #Length from the thorax-abdomen joint to the aerodynamic force #vector in cm m1 = rho*(4/3)*pi*(bhead**2)*ahead; #m1 is the mass of the head-thorax m2 = rho*(4/3)*pi*(bbutt**2)*abutt; #m2 is the mass of the abdomen #(petiole + gaster) echead = ahead/bhead; #Eccentricity of head-thorax (unitless) ecbutt = abutt/bbutt; #Eccentricity of gaster (unitless) I1 = (1/5)*m1*(bhead**2)*(1 + echead**2); #Moment of inertia of the #head-thorax I2 = (1/5)*m2*(bbutt**2)*(1 + ecbutt**2); #Moment of inertia of the gaster S_head = pi*bhead**2; #This is the surface area of the object experiencing drag. #In this case, it is modeled as a sphere. S_butt = pi*bbutt**2; #This is the surface area of the object experiencing drag. #In this case, it is modeled as a sphere. K = 29.3 #K is the torsional spring constant of the thorax-petiole joint #in (cm^2)*g/(rad*(s^2)) c = 14075.8 #c is the torsional damping constant of the thorax-petiole joint #in (cm^2)*g/s g = 980.0 #g is the acceleration due to gravity in cm/(s^2) betaR = 0.0 #This is the resting configuration of our #torsional spring(s) = Initial abdomen angle - initial head angle - pi #This cell just checks to be sure we can run this puppy and graph results. state0 = [0.0, 0.0001, 0.0, 0.0001, np.pi/4, 0.0, np.pi/4 + np.pi, 0.0] #initial conditions [x0 , v0 etc0 ] F = 0 # . CAUTION .. .I just set this to zero. # By the way --if you give this an initial kick and keep the force low, it has a nice parabolic trajectory alpha = 5.75 tau0 = 100. # ti = 0.0 # initial time # tf = 8 # final time # nstep = 1000 # t = np.linspace(0, tf, num = nstep, endpoint = True) tf = 1.0 # final time nstep = 1000 step = (tf-ti)/nstep # step t = arange(ti, tf, step) print(t.shape) state = odeint(FlyTheBug, state0, t) x = array(state[:,[0]]) xd = array(state[:,[1]]) y = array(state[:,[2]]) yd = array(state[:,[3]]) theta = array(state[:,[4]]) thetad = array(state[:,[5]]) phi = array(state[:,[6]]) phid = array(state[:,[7]]) # And let's just plot it all sb.set() print(x[-1:], y[-1:]) x100 = [x[-1:], y[-1:]] plt.figure() plt.plot(t,xd, label = 'Ux vs time') plt.plot(t,yd, label = 'Uy vs time') plt.legend() plt.figure() plt.plot(t,theta, label = 'theta vs time') plt.legend() plt.show() plt.plot(t,theta-phi - np.pi, label = 'theta vs time') plt.figure() plt.plot(x,y, label = 'x vs y') plt.legend() #This cell just checks to be sure we can run this puppy and graph results. state0 = [0.0, 0.0001, 0.0, 0.0001, np.pi/4, 0.0, np.pi/4 + np.pi, 0.0] #initial conditions [x0 , v0 etc0 ] F = 40462.5 # . CAUTION .. .I just set this to zero. # By the way --if you give this an initial kick and keep the force low, it has a nice parabolic trajectory alpha = 5.75 # tau0 = 69825. # ti = 0.0 # initial time # tf = 0.02 # final time # nstep = 2 # t = np.linspace(0, tf, num = nstep, endpoint = True) tf = 1.0 # final time nstep = 1000 step = (tf-ti)/nstep # step t = arange(ti, tf, step) print(t.shape) state = odeint(FlyTheBug, state0, t) x = array(state[:,[0]]) xd = array(state[:,[1]]) y = array(state[:,[2]]) yd = array(state[:,[3]]) theta = array(state[:,[4]]) thetad = array(state[:,[5]]) phi = array(state[:,[6]]) phid = array(state[:,[7]]) # And let's just plot it all sb.set() print(x[-1:], y[-1:]) plt.figure() plt.plot(t,xd, label = 'Ux vs time') plt.plot(t,yd, label = 'Uy vs time') plt.legend() plt.figure() plt.plot(t,theta, label = 'theta vs time') plt.legend() plt.figure() plt.plot(x,y, label = 'x vs y') plt.legend() plt.show() x100 - np.array([x[-1:], y[-1:]]) print(x[99]) print(y[99]) print(theta[99]) # This cell just tests the random assignmnent of forces and plots the result in the next cell tic = time.time() ti = 0.0 # initial time tf = 0.02 # final time nstep = 100 # number of time steps. step = (tf-ti)/nstep # duration of the time step t = arange(ti, tf, step) # how much time nrun = 100 #number of trajectories. x = [[0 for x in range(nrun)] for y in range(nstep)] # initialize the matrix of locations xd = [[0 for x in range(nrun)] for y in range(nstep)] y = [[0 for x in range(nrun)] for y in range(nstep)] yd = [[0 for x in range(nrun)] for y in range(nstep)] theta = [[0 for x in range(nrun)] for y in range(nstep)] thetad = [[0 for x in range(nrun)] for y in range(nstep)] phi = [[0 for x in range(nrun)] for y in range(nstep)] phid = [[0 for x in range(nrun)] for y in range(nstep)] state0 = [0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0] #initial conditions [x0 , v0 etc0 ] for i in range(0,nrun): r = random.random()-0.5 # random number between -0.5 and 0.5 F = r*100000 # By the way --if you give this an initial kick and keep the force low, it has a nice parabolic trajectory r = random.random()-0.5 alpha = r*np.pi r = random.random()-0.5 tau0 = r*100 state = odeint(FlyTheBug, state0, t) x[i][:] = array(state[:,[0]]) xd[i][:] = array(state[:,[1]]) y[i][:] = array(state[:,[2]]) yd[i][:] = array(state[:,[3]]) theta[i][:] = array(state[:,[4]]) thetad[i][:] = array(state[:,[5]]) phi[i][:] = array(state[:,[6]]) phid[i][:] = array(state[:,[7]]) print('elapsed time = ',time.time()-tic) plt.figure() for i in range(0,nrun): plt.plot(x[i][:],y[i][:], label = 'trajectory x vs y') # There are two forks in the road # One is to select myriad random ICs and and myriad random Forces/ Torques.. then learn. # The other fork generates a tracking beahvior using MPC with MC. In the latter, we want to specify a trajectory print(x[:][nstep-1]) #%Weighting coefficients from Jorge ... hope they're the recent ones. #%c1 = xdot, c2 = ydot, c3 = thetadot, c4 = x, c5 = y, c6 = theta #c1 = 1*10^-5; c2 = 1*10^-5; c3 = 10^6; c4 = 10^7; c5 = 10^8; c6 = 10^10; CostWeights = [10**7,10**-5,10**8,10**-5,10^10,10^6,0,0] #EndState = [x[:][nstep-1],xd[:][nstep-1]],y[:][nstep-1],yd[:][nstep-1],theta[:][nstep-1],thetad[:][nstep-1],phi[:][nstep-1],phid[:][nstep-1] Goal = [0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01] print(np.dot(CostWeights,np.abs(EndState - Goal))) import multiprocessing multiprocessing.cpu_count() ```
github_jupyter
``` from IPython.display import display, HTML from pyspark.sql import SparkSession from pyspark import StorageLevel import pandas as pd from pyspark.sql.types import StructType, StructField,StringType, LongType, IntegerType, DoubleType, ArrayType from pyspark.sql.functions import regexp_replace from sedona.register import SedonaRegistrator from sedona.utils import SedonaKryoRegistrator, KryoSerializer from pyspark.sql.functions import col, split, expr from pyspark.sql.functions import udf, lit from sedona.utils import SedonaKryoRegistrator, KryoSerializer from pyspark.sql.functions import col, split, expr from pyspark.sql.functions import udf, lit ``` # Create Spark Session for application ``` spark = SparkSession.\ builder.\ master("local[*]").\ appName("Demo-app").\ config("spark.serializer", KryoSerializer.getName).\ config("spark.kryo.registrator", SedonaKryoRegistrator.getName) .\ config("spark.jars.packages", "org.apache.sedona:sedona-python-adapter-3.0_2.12:1.1.0-incubating,org.datasyslab:geotools-wrapper:1.1.0-25.2") .\ getOrCreate() SedonaRegistrator.registerAll(spark) sc = spark.sparkContext ``` # Geotiff Loader 1. Loader takes as input a path to directory which contains geotiff files or a parth to particular geotiff file 2. Loader will read geotiff image in a struct named image which contains multiple fields as shown in the schema below which can be extracted using spark SQL ``` # Path to directory of geotiff images DATA_DIR = "./data/raster/" df = spark.read.format("geotiff").option("dropInvalid",True).load(DATA_DIR) df.printSchema() df = df.selectExpr("image.origin as origin","ST_GeomFromWkt(image.wkt) as Geom", "image.height as height", "image.width as width", "image.data as data", "image.nBands as bands") df.show(5) ``` # Extract a particular band from geotiff dataframe using RS_GetBand() ``` ''' RS_GetBand() will fetch a particular band from given data array which is the concatination of all the bands''' df = df.selectExpr("Geom","RS_GetBand(data, 1,bands) as Band1","RS_GetBand(data, 2,bands) as Band2","RS_GetBand(data, 3,bands) as Band3", "RS_GetBand(data, 4,bands) as Band4") df.createOrReplaceTempView("allbands") df.show(5) ``` # Map Algebra operations on band values ``` ''' RS_NormalizedDifference can be used to calculate NDVI for a particular geotiff image since it uses same computational formula as ndvi''' NomalizedDifference = df.selectExpr("RS_NormalizedDifference(Band1, Band2) as normDiff") NomalizedDifference.show(5) ''' RS_Mean() can used to calculate mean of piel values in a particular spatial band ''' meanDF = df.selectExpr("RS_Mean(Band1) as mean") meanDF.show(5) """ RS_Mode() is used to calculate mode in an array of pixels and returns a array of double with size 1 in case of unique mode""" modeDF = df.selectExpr("RS_Mode(Band1) as mode") modeDF.show(5) ''' RS_GreaterThan() is used to mask all the values with 1 which are greater than a particular threshold''' greaterthanDF = spark.sql("Select RS_GreaterThan(Band1,1000.0) as greaterthan from allbands") greaterthanDF.show() ''' RS_GreaterThanEqual() is used to mask all the values with 1 which are greater than a particular threshold''' greaterthanEqualDF = spark.sql("Select RS_GreaterThanEqual(Band1,360.0) as greaterthanEqual from allbands") greaterthanEqualDF.show() ''' RS_LessThan() is used to mask all the values with 1 which are less than a particular threshold''' lessthanDF = spark.sql("Select RS_LessThan(Band1,1000.0) as lessthan from allbands") lessthanDF.show() ''' RS_LessThanEqual() is used to mask all the values with 1 which are less than equal to a particular threshold''' lessthanEqualDF = spark.sql("Select RS_LessThanEqual(Band1,2890.0) as lessthanequal from allbands") lessthanEqualDF.show() ''' RS_AddBands() can add two spatial bands together''' sumDF = df.selectExpr("RS_AddBands(Band1, Band2) as sumOfBand") sumDF.show(5) ''' RS_SubtractBands() can subtract two spatial bands together''' subtractDF = df.selectExpr("RS_SubtractBands(Band1, Band2) as diffOfBand") subtractDF.show(5) ''' RS_MultiplyBands() can multiple two bands together''' multiplyDF = df.selectExpr("RS_MultiplyBands(Band1, Band2) as productOfBand") multiplyDF.show(5) ''' RS_DivideBands() can divide two bands together''' divideDF = df.selectExpr("RS_DivideBands(Band1, Band2) as divisionOfBand") divideDF.show(5) ''' RS_MultiplyFactor() will multiply a factor to a spatial band''' mulfacDF = df.selectExpr("RS_MultiplyFactor(Band2, 2) as target") mulfacDF.show(5) ''' RS_BitwiseAND() will return AND between two values of Bands''' bitwiseAND = df.selectExpr("RS_BitwiseAND(Band1, Band2) as AND") bitwiseAND.show(5) ''' RS_BitwiseOR() will return OR between two values of Bands''' bitwiseOR = df.selectExpr("RS_BitwiseOR(Band1, Band2) as OR") bitwiseOR.show(5) ''' RS_Count() will calculate the total number of occurence of a target value''' countDF = df.selectExpr("RS_Count(RS_GreaterThan(Band1,1000.0), 1.0) as count") countDF.show(5) ''' RS_Modulo() will calculate the modulus of band value with respect to a given number''' moduloDF = df.selectExpr("RS_Modulo(Band1, 21.0) as modulo ") moduloDF.show(5) ''' RS_SquareRoot() will calculate calculate square root of all the band values upto two decimal places''' rootDF = df.selectExpr("RS_SquareRoot(Band1) as root") rootDF.show(5) ''' RS_LogicalDifference() will return value from band1 if value at that particular location is not equal tp band1 else it will return 0''' logDiff = df.selectExpr("RS_LogicalDifference(Band1, Band2) as loggDifference") logDiff.show(5) ''' RS_LogicalOver() will iterate over two bands and return value of first band if it is not equal to 0 else it will return value from later band''' logOver = df.selectExpr("RS_LogicalOver(Band3, Band2) as logicalOver") logOver.show(5) ``` # Visualising Geotiff Images 1. Normalize the bands in range [0-255] if values are greater than 255 2. Process image using RS_Base64() which converts in into a base64 string 3. Embedd results of RS_Base64() in RS_HTML() to embedd into IPython notebook 4. Process results of RS_HTML() as below: ``` ''' Plotting images as a dataframe using geotiff Dataframe.''' df = spark.read.format("geotiff").option("dropInvalid",True).load(DATA_DIR) df = df.selectExpr("image.origin as origin","ST_GeomFromWkt(image.wkt) as Geom", "image.height as height", "image.width as width", "image.data as data", "image.nBands as bands") df = df.selectExpr("RS_GetBand(data,1,bands) as targetband", "height", "width", "bands", "Geom") df_base64 = df.selectExpr("Geom", "RS_Base64(height,width,RS_Normalize(targetBand), RS_Array(height*width,0.0), RS_Array(height*width, 0.0)) as red","RS_Base64(height,width,RS_Array(height*width, 0.0), RS_Normalize(targetBand), RS_Array(height*width, 0.0)) as green", "RS_Base64(height,width,RS_Array(height*width, 0.0), RS_Array(height*width, 0.0), RS_Normalize(targetBand)) as blue","RS_Base64(height,width,RS_Normalize(targetBand), RS_Normalize(targetBand),RS_Normalize(targetBand)) as RGB" ) df_HTML = df_base64.selectExpr("Geom","RS_HTML(red) as RedBand","RS_HTML(blue) as BlueBand","RS_HTML(green) as GreenBand", "RS_HTML(RGB) as CombinedBand") df_HTML.show(5) display(HTML(df_HTML.limit(2).toPandas().to_html(escape=False))) ``` # User can also create some UDF manually to manipulate Geotiff dataframes ``` ''' Sample UDF calculates sum of all the values in a band which are greater than 1000.0 ''' def SumOfValues(band): total = 0.0 for num in band: if num>1000.0: total+=1 return total calculateSum = udf(SumOfValues, DoubleType()) spark.udf.register("RS_Sum", calculateSum) sumDF = df.selectExpr("RS_Sum(targetband) as sum") sumDF.show() ''' Sample UDF to visualize a particular region of a GeoTiff image''' def generatemask(band, width,height): for (i,val) in enumerate(band): if (i%width>=12 and i%width<26) and (i%height>=12 and i%height<26): band[i] = 255.0 else: band[i] = 0.0 return band maskValues = udf(generatemask, ArrayType(DoubleType())) spark.udf.register("RS_MaskValues", maskValues) df_base64 = df.selectExpr("Geom", "RS_Base64(height,width,RS_Normalize(targetband), RS_Array(height*width,0.0), RS_Array(height*width, 0.0), RS_MaskValues(targetband,width,height)) as region" ) df_HTML = df_base64.selectExpr("Geom","RS_HTML(region) as selectedregion") display(HTML(df_HTML.limit(2).toPandas().to_html(escape=False))) ```
github_jupyter
# Stepper Motors * [How to use a stepper motor with the Raspberry Pi Pico](https://www.youngwonks.com/blog/How-to-use-a-stepper-motor-with-the-Raspberry-Pi-Pico) * [Control 28BYJ-48 Stepper Motor with ULN2003 Driver & Arduino](https://lastminuteengineers.com/28byj48-stepper-motor-arduino-tutorial/) Description of the 27BYJ-48 stepper motor, ULN2003 driver, and Arduino code. * [28BYJ-48 stepper motor and ULN2003 Arduino (Quick tutorial for beginners)](https://www.youtube.com/watch?v=avrdDZD7qEQ) Video description. * [Stepper Motor - Wikipedia](https://en.wikipedia.org/wiki/Stepper_motor) <img src="https://upload.wikimedia.org/wikipedia/commons/6/66/28BYJ-48_unipolar_stepper_motor_with_ULN2003_driver.jpg" alt="28BYJ-48 unipolar stepper motor with ULN2003 driver.jpg" height="480" width="640"> <a href="https://commons.wikimedia.org/w/index.php?curid=83551720">Link</a> ## Stepper Motors ![](https://cdn-learn.adafruit.com/assets/assets/000/016/234/original/components_IMG_4810_crop.jpg?1398735192) [Adafruit](https://learn.adafruit.com/all-about-stepper-motors/types-of-steppers) ![](https://cdn-learn.adafruit.com/assets/assets/000/016/342/original/components_IMG_4837.jpg?1399130432) [Adafruit](https://learn.adafruit.com/all-about-stepper-motors/types-of-steppers) ![](https://cdn-learn.adafruit.com/assets/assets/000/016/343/large1024/components_winding_types_2.png?1399130808) ## Unipolar Stepper Motors The ubiquitous 28BYJ-48 stepper motor with reduction gears that is manufactured by the millions and widely available at very low cost. [Elegoo, for example, sells kits of 5 motors with ULN2003 5V driver boards](https://www.elegoo.com/products/elegoo-uln2003-5v-stepper-motor-uln2003-driver-board) for less than $15/kit. The [UNL2003](https://en.wikipedia.org/wiki/ULN2003A) is a package of seven NPN Darlington transistors capable of 500ma output at 50 volts, with flyback diodes to drive inductive loads. ![](https://cdn-learn.adafruit.com/assets/assets/000/016/349/medium640/components_unipolar_5.png?1399131989) ![](https://m.media-amazon.com/images/S/aplus-seller-content-images-us-east-1/ATVPDKIKX0DER/A2WWHQ25ENKVJ1/B01CP18J4A/cZgPvVZSJSP._UX970_TTW__.jpg) The 28BJY-48 has 32 teeth thus each full step corresponds to 360/32 = 11.25 degrees of rotation. A set of four reduction gears yields a 63.68395:1 gear reduction, or 2037.8864 steps per rotation. The maximum speed is 500 steps per second. If half steps are used, then there are 4075.7728 half steps per revolution at a maximum speed of 1000 half steps per second. (See https://youtu.be/15K9N1yVnhc for a teardown of the 28BYJ-48 motor.) ## Driving the 28BYJ-48 Stepper Motor (Also see https://www.youtube.com/watch?v=UJ4JjeCLuaI&ab_channel=TinkerTechTrove) The following code assigns four GPIO pins to the four coils. For this code, the pins don't need to be contiguous or in order, but keeping that discipline may help later when we attempt to implement a driver using the PIO state machines of the Raspberry Pi Pico. Note that the Stepper class maintains an internal parameter corresponding to the current rotor position. This is used to index into the sequence data using modular arithmetic. See []() for ideas on a Stepper class. ``` %serialconnect from machine import Pin import time class Stepper(object): step_seq = [[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1], [1, 0, 0, 1]] def __init__(self, gpio_pins): self.pins = [Pin(pin, Pin.OUT) for pin in gpio_pins] self.motor_position = 0 def rotate(self, degrees=360): n_steps = abs(int(4075.7728*degrees/360)) d = 1 if degrees > 0 else -1 for _ in range(n_steps): self.motor_position += d phase = self.motor_position % len(self.step_seq) for i, value in enumerate(self.step_seq[phase]): self.pins[i].value(value) time.sleep(0.001) stepper = Stepper([2, 3, 4, 5]) stepper.rotate(360) stepper.rotate(-360) print(stepper.motor_position) ``` Discussion: * What class methods should we build to support the syringe pump project? * Should we simplify and stick with half-step sequence? * How will be integrate motor operation with UI buttons and other controls? ## Programmable Input/Ouput (PIO) * MicroPython (https://datasheets.raspberrypi.org/pico/raspberry-pi-pico-python-sdk.pdf) * TinkerTechTrove [[github]](https://github.com/tinkertechtrove/pico-pi-playinghttps://github.com/tinkertechtrove/pico-pi-playing) [[youtube]](https://www.youtube.com/channel/UCnoBIijHK7NnCBVpUojYFTA/videoshttps://www.youtube.com/channel/UCnoBIijHK7NnCBVpUojYFTA/videos) * [Raspberry Pi Pico PIO - Ep. 1 - Overview with Pull, Out, and Parallel Port](https://youtu.be/YafifJLNr6I) ``` %serialconnect from machine import Pin from rp2 import PIO, StateMachine, asm_pio from time import sleep import sys @asm_pio(set_init=(PIO.OUT_LOW,) * 4) def prog(): wrap_target() set(pins, 8) [31] # 8 nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] set(pins, 4) [31] # 4 nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] set(pins, 2) [31] # 2 nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] set(pins, 1) [31] # 1 nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] nop() [31] wrap() sm = StateMachine(0, prog, freq=100000, set_base=Pin(14)) sm.active(1) sleep(10) sm.active(0) sm.exec("set(pins,0)") %serialconnect from machine import Pin from rp2 import PIO, StateMachine, asm_pio from time import sleep import sys @asm_pio(set_init=(PIO.OUT_LOW,) * 4, out_init=(PIO.OUT_HIGH,) * 4, out_shiftdir=PIO.SHIFT_LEFT) def prog(): pull() mov(y, osr) # step pattern pull() mov(x, osr) # num steps jmp(not_x, "end") label("loop") jmp(not_osre, "step") # loop pattern if exhausted mov(osr, y) label("step") out(pins, 4) [31] nop() [31] nop() [31] nop() [31] jmp(x_dec,"loop") label("end") set(pins, 8) [31] # 8 sm = StateMachine(0, prog, freq=10000, set_base=Pin(14), out_base=Pin(14)) sm.active(1) sm.put(2216789025) #1000 0100 0010 0001 1000010000100001 sm.put(1000) sleep(10) sm.active(0) sm.exec("set(pins,0)") %serialconnect from machine import Pin from rp2 import PIO, StateMachine, asm_pio from time import sleep import sys @asm_pio(set_init=(PIO.OUT_LOW,) * 4, out_init=(PIO.OUT_LOW,) * 4, out_shiftdir=PIO.SHIFT_RIGHT, in_shiftdir=PIO.SHIFT_LEFT) def prog(): pull() mov(x, osr) # num steps pull() mov(y, osr) # step pattern jmp(not_x, "end") label("loop") jmp(not_osre, "step") # loop pattern if exhausted mov(osr, y) label("step") out(pins, 4) [31] jmp(x_dec,"loop") label("end") irq(rel(0)) sm = StateMachine(0, prog, freq=10000, set_base=Pin(14), out_base=Pin(14)) data = [(1,2,4,8),(2,4,8,1),(4,8,1,2),(8,1,2,4)] steps = 0 def turn(sm): global steps global data idx = steps % 4 a = data[idx][0] | (data[idx][1] << 4) | (data[idx][2] << 8) | (data[idx][3] << 12) a = a << 16 | a #print("{0:b}".format(a)) sleep(1) sm.put(500) sm.put(a) steps += 500 sm.irq(turn) sm.active(1) turn(sm) sleep(50) print("done") sm.active(0) sm.exec("set(pins,0)") %serialconnect import time import rp2 @rp2.asm_pio() def irq_test(): wrap_target() nop() [31] nop() [31] nop() [31] nop() [31] irq(0) nop() [31] nop() [31] nop() [31] nop() [31] irq(1) wrap() rp2.PIO(0).irq(lambda pio: print(pio.irq().flags())) #rp2.PIO(1).irq(lambda pio: print("1")) sm = rp2.StateMachine(0, irq_test, freq=2000) sm1 = rp2.StateMachine(1, irq_test, freq=2000) sm.active(1) #sm1.active(1) time.sleep(1) sm.active(0) sm1.active(0) ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_smoothing.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_smoothing.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_smoothing.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_smoothing.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium. ``` import subprocess try: import geehydro except ImportError: print('geehydro package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro']) ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ``` try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` image = ee.Image('srtm90_v4') smoothed = image.reduceNeighborhood(**{ 'reducer': ee.Reducer.mean(), 'kernel': ee.Kernel.square(3), }) # vis_params = {'min': 0, 'max': 3000} # Map.addLayer(image, vis_params, 'SRTM original') # Map.addLayer(smooth, vis_params, 'SRTM smoothed') Map.setCenter(-112.40, 42.53, 12) Map.addLayer(ee.Terrain.hillshade(image), {}, 'Original hillshade') Map.addLayer(ee.Terrain.hillshade(smoothed), {}, 'Smoothed hillshade') ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
``` import numpy as np arr = np.arange(0,11) arr # Simplest way to pick an element or some of the elements from an array is similar to indexing in a python list. arr[8] # Gives value at the index 8 # Slice Notations [start:stop] arr[1:5] # 1 inclusive and 5 exclusive # Another Example of Slicing arr[0:5] # To have everything from beginning to the index 6 we use the following syntax on a numpy array : print(arr[:6]) # No need to define the starting point and this basically means arr[0:6] # To have everything from a 5th index to the last we use the following syntax on a numpy array : print(arr[5:]) ``` # Broadcasting the Value **Numpy arrays differ from normal python list due to their ability to broadcast.** ``` arr[0:5] = 100 # Broacasts the value 100 to first 5 digits. arr # Reset the array arr = np.arange(0,11) arr slice_of_arr = arr[0:6] slice_of_arr # To grab everything in the slice slice_of_arr[:] # Broadcasting after grabbing everything in the array slice_of_arr[:] = 99 slice_of_arr arr # Notice above how not only slice_of_arr got changed due to the broadcast but the array arr was also changed. # Slice and the original array both got changed in terms of values. # Data is not copied but rather just copied or pointed from original array. # Reason behind such behaviour is that to prevent memory issues while dealing with large arrays. # It basically means numpy prefers not setting copies of arrays and would rather point slices to their original parent arrays. # Use copy() method which is array_name.copy() arr_copy = arr.copy() arr_copy arr_copy[0:5] = 23 arr arr_copy #Since we have copied now we can see that arr and arr_copy would be different even after broadcasting. # Original array remains unaffected despite changes on the copied array. # Main idea here is that if you grab the actual slice of the array and set it as variable without calling the method copy # on the array then you are just seeing the link to original array and changes on slice would reflect on original/parent array. ``` # 2D Array/Matrix ``` arr_2d = np.array([[5,10,15],[20,25,30],[35,40,45]]) arr_2d # REMEMBER If having confusion regarding dimensions of the matrix just call shape. arr_2d.shape # 3 rows, 3 columns # Two general formats for grabbing elements from a 2D array or matrix format : # (i) Double Bracket Format (ii) Single Bracket Format with comma (Recommended) # (i) Double Bracket Format arr_2d[0][:] # Gives all the elements inside the 0th index of array arr. # arr_2d[0][:] Also works arr_2d[1][2] # Gives the element at index 2 of the 1st index of arr_2d i.e. 30 # (ii) Single Bracket Format with comma (Recommended) : Removes [][] 2 square brackets with a tuple kind (x,y) format # To print 30 we do the following 1st row and 2nd index arr_2d[1,2] # Say we want sub matrices from the matrix arr_2d arr_2d[:3,1:] # Everything upto the third row, and anything from column 1 onwards. arr_2d[1:,:] ``` # Conditional Selection ``` arr = np.arange(1,11) arr # Taking the array arr and comapring it using comparison operators to get a full boolean array out of this. bool_arr = arr > 5 ''' 1. Getting the array and using a comparison operator on it will actually return a boolean array. 2. An array with boolean values in response to our condition. 3. Now we can use the boolean array to actually index or conditionally select elements from the original array where boolean array is true. ''' bool_arr arr[bool_arr] # Gives us only the results which are only true. # Doing what's described above in one line will be arr[arr<3] # arr[comaprison condition] Get used to this notation we use this a lot especially in Pandas! ``` # Exercise 1. Create a new 2d array np.arange(50).reshape(5,10). 2. Grab any 2sub matrices from the 5x10 chunk. ``` arr_2d = np.arange(50).reshape(5,10) arr_2d # Selecting 11 to 35 arr_2d[1:4,1:6]# Keep in mind it is exclusive for the end value in the start:end format of indexing. # Selecting 5-49 arr_2d[0:,5:] ```
github_jupyter
# NumPy, Pandas and Matplotlib with ICESat UW Geospatial Data Analysis CEE498/CEWA599 David Shean ## Objectives 1. Solidify basic skills with NumPy, Pandas, and Matplotlib 2. Learn basic data manipulation, exploration, and visualizatioin with a relatively small, clean point dataset (65K points) 3. Learn a bit more about the ICESat mission, the GLAS instrument, and satellite laser altimetry 4. Explore outlier removal, grouping and clustering # ICESat GLAS Background The NASA Ice Cloud and land Elevation Satellite ([ICESat](https://icesat.gsfc.nasa.gov/icesat/)) was a NASA mission carrying the Geosciences Laser Altimeter System (GLAS) instrument: a space laser, pointed down at the Earth (and unsuspecting Earthlings). It measured surface elevations by precisely tracking laser pulses emitted from the spacecraft at a rate of 40 Hz (a new pulse every 0.025 seconds). These pulses traveled through the atmosphere, reflected off the surface, back up through the atmosphere, and into space, where some small fraction of that original energy was received by a telescope on the spacecraft. The instrument electronics precisely recorded the time when these intrepid photons left the instrument and when they returned. The position and orientation of the spacecraft was precisely known, so the two-way traveltime (and assumptions about the speed of light and propagation through the atmosphere) allowed for precise forward determination of the spot on the Earth's surface (or cloud tops, as was often the case) where the reflection occurred. The laser spot size varied during the mission, but was ~70 m in diameter. ICESat collected billions of measurements from 2003 to 2009, and was operating in a "repeat-track" mode that sacrificed spatial coverage for more observations along the same ground tracks over time. One primary science focus involved elevation change over the Earth's ice sheets. It allowed for early measurements of full Antarctic and Greenland ice sheet elevation change, which offered a detailed look at spatial distribution and rates of mass loss, and total ice sheet contributions to sea level rise. There were problems with the lasers during the mission, so it operated in short campaigns lasting only a few months to prolong the full mission lifetime. While the primary measurements focused on the polar regions, many measurements were also collected over lower latitudes, to meet other important science objectives (e.g., estimating biomass in the Earth's forests, observing sea surface height/thickness over time). # Sample GLAS dataset for CONUS A few years ago, I wanted to evaluate ICESat coverage of the Continental United States (CONUS). The primary application was to extract a set of accurate control points to co-register a large set of high-resolution digital elevation modoels (DEMs) derived from satellite stereo imagery. I wrote some Python/shell scripts to download, filter, and process all of the [GLAH14 L2 Global Land Surface Altimetry Data](https://nsidc.org/data/GLAH14/versions/34) granules in parallel ([https://github.com/dshean/icesat_tools](https://github.com/dshean/icesat_tools)). The high-level workflow is here: https://github.com/dshean/icesat_tools/blob/master/glas_proc.py#L24. These tools processed each HDF5 (H5) file and wrote out csv files containing “good” points. These csv files were concatenated to prepare the single input csv (`GLAH14_tllz_conus_lulcfilt_demfilt.csv`) that we will use for this tutorial. The csv contains ICESat GLAS shots that passed the following filters: * Within some buffer (~110 km) of mapped glacier polygons from the [Randolph Glacier Inventory (RGI)](https://www.glims.org/RGI/) * Returns from exposed bare ground (landcover class 31) or snow/ice (12) according to a 30-m Land-use/Land-cover dataset (2011 NLCD, https://www.mrlc.gov/data?f%5B0%5D=category%3Aland%20cover) * Elevation values within some threshold (200 m) of elevations sampled from an external reference DEM (void-filled 1/3-arcsec [30-m] SRTM-GL1, https://lpdaac.usgs.gov/products/srtmgl1v003/), used to remove spurious points and returns from clouds. * Various other ICESat-specific quality flags (see comments in `glas_proc.py` for details) The final file contains a relatively small subset (~65K) of the total shots in the original GLAH14 data granules from the full mission timeline (2003-2009). The remaining points should represent returns from the Earth's surface with reasonably high quality, and can be used for subsequent analysis. # Lab Exercises Let's use this dataset to explore some of the NumPy and Pandas functionality, and practice some basic plotting with Matplotlib. I've provided instructions and hints, and you will need to fill in the code to generate the output results and plots. ## Import necessary modules ``` #Use shorter names (np, pd, plt) instead of full (numpy, pandas, matplotlib.pylot) for convenience import numpy as np import pandas as pd import matplotlib.pyplot as plt #Magic function to enable interactive plotting (zoom/pan) in Jupyter notebook #If running locally, this would be `%matplotlib notebook`, but since we're using Juptyerlab, we use widget #%matplotlib widget #Use matplotlib inline to render/embed figures in the notebook for upload to github %matplotlib inline #%matplotlib widget ``` ## Define relative path to the GLAS data csv from week 01 ``` glas_fn = '../01_Shell_Github/data/GLAH14_tllz_conus_lulcfilt_demfilt.csv' ``` ## Do a quick check of file contents * Use iPython functionality to run the `head` shell command on the your filename variable # NumPy Exercises ## Load the file * NumPy has some convenience functions for loading text files: `loadtxt` and `genfromtxt` * Use `loadtxt` here (simpler), but make sure you properly set the delimiter and handle the first row (see the `skiprows` option) * Use iPython `?` to look up reference on arguments for `np.loadtxt` * Store the NumPy array as variable called `glas_np` ## Do a quick check to make sure your array looks good * Don't use `print(glas_np)` here, just run cell containing `glas_np` * Try both - note that the latter returns the object type, in this case `array` ## How many rows and columns are in your array? ## What is the datatype of your array? Note that a NumPy array typically has a single datatype, while a Pandas DataFrame can contain multiple data types (e.g., `string`, `float64`) ## Examine the first 3 rows * Use slicing here ## Examine the column with glas_z values * You will need to figure out which column number corresponds to these values (can do this manually from header), then slice the array to return all rows, but only that column ## Compute the mean and standard deviation of the glas_z values ## Use print formatting to create a formatted string with these values * Should be `'GLAS z: mean +/- std meters'` using your `mean` and `std` values, both formatted with 2 decimal places (cm-precision) * For example: 'GLAS z: 1234.56 +/- 42.42 meters' ## Create a Matplotlib scatter plot of the `glas_z` values * Careful about correclty defining your x and y with values for latitude and longitude - easy to mix these up * Use point color to represent the elevation * You should see points that roughly outline the western United States * Label the x axis, y axis, and add a descriptive title ## Use conditionals and fancy indexing to extract points from 2005 * Design a "filter" to isolate the points from 2005 * Can use boolean indexing * Can then extract values from original array using the boolean index * Store these points in a new NumPy array ### How many points were acquired in 2005? # Pandas Exercises A significant portion of the Python data science ecosystem is based on Pandas and/or Pandas data models. >pandas is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, real world data analysis in Python. Additionally, it has the broader goal of becoming the most powerful and flexible open source data analysis / manipulation tool available in any language. It is already well on its way towards this goal. https://github.com/pandas-dev/pandas#main-features If you are working with tabular data, especially time series data, please use pandas. * A better way to deal with tabular data, built on top of NumPy arrays * With NumPy, we had to remember which column number (e.g., 3, 4) represented each variable (lat, lon, glas_z, etc) * Pandas allows you to store data with different types, and then reference using more meaningful labels * NumPy: `glas_np[:,4]` * Pandas: `glas_df['glas_z']` * A good "10-minute" reference with examples: https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html ## Load the csv file with Pandas * Note that pandas has excellent readers for most common file formats: https://pandas.pydata.org/pandas-docs/stable/reference/io.html ## That was easy. Let's inspect the `DataFrame` ## Check data types * Can use the DataFrame `info` method ## Get the column labels * Can use the DataFrame `columns` attribute If you are new to Python and object-oriented programming, take a moment to consider the difference between the methods and attributes of the DataFrame, and how both are accessed. https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html If this is confusing, ask your neighbor or instructor. ## Preview records using DataFrame `head` and `tail` methods ## Compute the mean and standard deviation for all values in each column * Don't overthink this, should be simple (no loops!) ## Print quick stats for entire DataFrame with the `describe` method Useful, huh? Note that `median` is the `50%` statistic ## Use the Pandas plotting functionality to create a 2D scatterplot of `glas_z` values * https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.scatter.html * Note that labels and colorbar are automatically plotted! * Adjust the size of the points using the `s=1` keyword * Experiment with different color ramps: * https://matplotlib.org/examples/color/colormaps_reference.html (I prefer `inferno`) #### Color ramps Information on how to choose a good colormap for your data: https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html Another great resource (Thanks @fperez!): https://matplotlib.org/cmocean/ **TL;DR** Don't use `jet`, use a perceptually uniform colormap for linear variables like elevation. Use a diverging color ramp for values where sign is important. ## Experiment by changing the variable represented with the color ramp * Try `decyear` or other columns to quickly visualize spatial distribution of these values. ## Extra Credit: Create a 3D scatterplot See samples here: https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html Explore with the interactive tools (click and drag to change perspective). Some lag here considering number of points to be rendered, and maybe useful for visualizing small 3D datasets in the future. There are other 3D plotting packages that are built for performance and efficiency (e.g., `ipyvolume`: https://github.com/maartenbreddels/ipyvolume) ## Create a histogram that shows the number of points vs time (`decyear`) * Should be simple with built-in method for your `DataFrame` * Make sure that you use enough bins to avoid aliasing. This could require some trial and error (try 10, 100, 1000, and see if you can find a good compromise) * Can also consider some of the options (e.g., 'auto') here: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram_bin_edges.html#numpy.histogram_bin_edges * You should be able to resolve the distinct campaigns during the mission (each ~1-2 months long). There is an extra credit problem at the end to group by years and play with clustering for the campaigns. ## Create a histogram of all `glas_z` elevation values * What do you note about the distribution? * Any negative values? ## Wait a minute...negative elevations!? Who calibrated this thing? C'mon NASA. ## A note on vertical datums Note that some elevations are less than 0 m. How can this be? The `glas_z` values are height above (or below) the WGS84 ellipsoid. This is not the same vertical datum as mean sea level (roughly approximated by a geoid model). A good resource explaining the details: https://vdatum.noaa.gov/docs/datums.html ## Let's check the spatial distribution of points below 0 (height above WGS84 ellipsoid) * How many shots have a negative glas_z value? * Create a scatterplot only using points with negative values * Adjust the color ramp bounds to bring out more detail for these points * hint: see the `vmin` and `vmax` arguments for the `plot` function * What do you notice about these points? (may be tough without more context, like coastlines and state boundaries or a tiled basemap - we'll learn how to incorporate these soon) ## Geoid offset Height difference between the WGS84 ellipsoid (simple shape model of the Earth) and a geoid, that approximates a geopotential (gravitational) surface, approximately mean sea level. ![EGM96_geoid_offset_grid](https://raw.githubusercontent.com/UW-GDA/gda_course_2020/205a411cab3492450c9c0265889b54f5b9d4b699/resources/sample_img/egm96_offset.png) Note values for the Western U.S. ### Interpretation A lot of the points with elevation < 0 m in your scatterplot are near coastal sites, roughly near mean sea level. We see that the geoid offset (difference between WGS84 ellipsoid and EGM96 geoid in this case) for CONUS is roughly -20 m. So the ICESat GLAS point elevations near the coast will have values of around -20 m relative to the ellipsoid, even though they are around 0 m relative to the geoid (approximately mean sea level). Another cluster of points with negative elevations is over Death Valley, CA, which is actually below sea level: https://en.wikipedia.org/wiki/Death_Valley. If this is confusing, we will revisit when we explore raster DEMs later in the quarter. We also get into all of this in the Spring Advanced Surveying course (ask me for details). ## Compute the elevation difference between ICESat `glas_z` and SRTM `dem_z` values Earlier, I mentioned that I had sampled the SRTM DEM for each GLAS shot. Let's compute the difference and store in a new column in our DataFrame called `glas_srtm_dh` Remember the order of this calculation (if the difference values are negative, which dataset is higher elevation?) ## Do a quick `head` to verify that the values in your new column look reasonable ## Compute the time difference between ICESat point timestamp and the SRTM timestamp * Store in a new column named `glas_srtm_dt` * The SRTM data were collected between February 11-22, 2000 * Can assume a constant decimal year value of 2000.112 for now * Check values with `head` ## Compute *apparent* annualized elevation change rates (meters per year) from these new columns * This will be rate of change between the SRTM timestamp (2000) and each GLAS point timestamp (2003-2009) * Check values with `head` ## Create a scatterplot of the difference values * Use a `RdBu` (Red to Blue) color ramp * Set the color ramp limits using `vmin` and `vmax` keyword arguments to be symmetrical about 0 * Generate two plots with different color ramp range to bring out detail * Do you see outliers (values far outside the expected distribution)? * Do you see any coherent spatial patterns in the difference values? ## Create a histogram of the difference values * Increase the number of bins, and limit the range to bring out detail of the distribution ## Compute the mean, median and standard deviation of the differences * Why might we have a non-zero mean/median difference? ## Create a scatterplot of elevation difference `glas_srtm_dh` values vs elevation values * `glas_srtm_dh` should be on the y-axis * `glas_z` values on the x-axis ## Extra Credit: Remove outliers The initial filter in `glas_proc.py` removed GLAS points with absolute elevation difference >200 m compared to the SRTM elevations. We expect most real elevation change signals to be less than this for the given time period. But clearly some outliers remain. Design and apply a filter that removes outliers. One option is to define outliers as values outside some absolute threshold. Can set this threshold as some multiple of the standard deviation (e.g., `3*std`). Can also use quantile or percentile values for this. Create new plot(s) to visualize the distribution of outliers and inliers. I've included my figure as a reference, but please don't worry about reproducing! Focus on the filtering and create some quick plots to verify that things worked. ## Active remote sensing sanity check Even after removing outliers, there are still some big differences between the SRTM and GLAS elevation values. * Do you see systematic differences between the glas_z and dem_z values? * Any clues from the scatterplot? (e.g., do some tracks (north-south lines of points) display systematic bias?) * Brainstorm some ideas about what might be going on here. Think about the nature of each sensor: * ICESat was a Near-IR laser (1064 nm wavelength) with a big ground spot size (~70 m in diameter) * Timestamps span different seasons between 2003-2009 * SRTM was a C-band radar (5.3 GHz, 5.6 cm wavelength) with approximately 30 m ground sample distance (pixel size) * Timestamp was February 2000 * Data gaps (e.g., radar shadows, steep slopes) were filled with ASTER GDEM2 composite, which blends DEMs acquired over many years ~2000-2014 * Consider different surfaces and how the laser/radar footprint might be affected: * Flat bedrock surface * Dry sand dunes * Steep montain topography like the Front Range in Colorado * Dense vegetation of the Hoh Rainforest in Olympic National Park ## Let's check to see if differences are due to our land-use/land-cover classes * Determine the unique values in the `lulc` column (hint: see the `value_counts` method) * In the introduction, I said that I initially preserved only two classes for these points (12 - snow/ice, 31 - barren land), so this isn't going to help us over forests: * https://www.mrlc.gov/data/legends/national-land-cover-database-2011-nlcd2011-legend ## Use Pandas `groupby` to compute stats for the LULC classes * This is one of the most powerful features in Pandas, efficient grouping and analysis based on some values * Compute mean, median and std of the difference values (glas_z - dem_z) for each LULC class * Do you see a difference between values over glaciers vs bare rock? ## Extra credit: `groupby` year * See if you can use Pandas `groupby` to count the number of shots for each year * Multiple ways to accomplish this * One approach might be to create a new column with integer year, then groupby that column * Can modify the `decyear` values (see `floor`), or parse the Python time ordinals * Create a bar plot showing number of shots in each year ## Extra Credit: Cluster by campaign * See if you can create an algorithm to cluster the points by campaign * Note, spatial coordinates should not be necessary here (remember your histogram earlier that showed the number of points vs time) * Can do something involving differences between sorted point timestamps * Can also go back and count the number of campaigns in your earlier histogram of `decyear` values, assuming that you used enough bins to discretize! * K-Means clustering is a nice option: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html * Compute the number of shots and length (number of days) for each campaign * Compare your answer with table here: https://nsidc.org/data/icesat/laser_op_periods.html (remember that we are using a subset of points over CONUS, so the number of days might not match perfectly) ## Extra Credit: Annual scatterplots * Create a figure with multiple subplots showing scatterplots of points for each year ## Extra Credit: Campaign scatterplots * Create a figure with multiple subplots showing scatterplots of points for each campaign
github_jupyter
``` import os import couchdb from lib.genderComputer.genderComputer import GenderComputer server = couchdb.Server(url='http://127.0.0.1:15984/') db = server['tweets'] gc = GenderComputer(os.path.abspath('./data/nameLists')) date_list = [] for row in db.view('_design/analytics/_view/conversation-date-breakdown', reduce=True, group=True): date_list.append(row.key) print(date_list) from collections import Counter view_data = [] for row in db.view('_design/analytics/_view/tweets-victoria',startkey="2017/3/6",endkey="2017/3/9"): view_data.append(row.value) len(view_data) try: hashtags = server.create["twitter-hashtags"] except: hashtags = server["twitter-hashtags"] hashtag_count = Counter() for row in view_data: hashtag_count.update(row["hashtags"]) for tag in hashtag_count.most_common(): doc = hashtags.get(tag[0]) # tag[0] -> hashtag, tag[1] -> frequency if doc is None: data = {} data["_id"] = tag[0].replace('\u','') # use word as an id data["hashtag"] = tag[0].replace('\u','') data["count"] = tag[1] else: data = doc data["count"] = data["count"] + tag[1] hashtags.save(data) texts = [] users = [] for row in view_data: text = {} text["text"] = row["text"] text["sentiment"] = row["sentiment"] texts.append(text) user = row["user"] try: gender = gc.resolveGender(user["name"], None) user["gender"] = gender except: continue users.append(user) print("text",len(texts)," user", len(users)) import re emoticons_str = r""" (?: [:=;] # Eyes [oO\-]? # Nose (optional) [D\)\]\(\]/\\OpP] # Mouth )""" regex_str = [ emoticons_str, r'<[^>]+>', # HTML tags r'(?:@[\w_]+)', # @-mentions r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and ' r'(?:[\w_]+)', # other words r'(?:\S)' # anything else ] tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE) emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE) def tokenize(s): return tokens_re.findall(s) def preprocess(s, lowercase=False): tokens = tokenize(s) if lowercase: tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens] return tokens ## Save Terms Frequency import HTMLParser from collections import Counter from nltk.corpus import stopwords import string punctuation = list(string.punctuation) stop = stopwords.words('english') + punctuation + ['rt', 'via'] count_all = Counter() html_parser = HTMLParser.HTMLParser() emoji_pattern = re.compile( u"(\ud83d[\ude00-\ude4f])|" # emoticons u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2) u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2) u"(\ud83d[\ude80-\udeff])|" # transport & map symbols u"(\ud83c[\udde0-\uddff])" # flags (iOS) "+", flags=re.UNICODE) for text in texts: cleanText = re.sub(r"http\S+", "", text['text']) cleanText = html_parser.unescape(cleanText) cleanText = emoji_pattern.sub(r'', cleanText) terms_stop = [term for term in preprocess(cleanText) if term not in stop] count_all.update(terms_stop) try: words = server.create["twitter-words"] except: words = server["twitter-words"] for num in count_all.most_common(): doc = words.get(num[0]) # num[0] -> word, num[1] -> frequency try: if doc is None: data = {} word_text = num[0].decode("utf8").encode('ascii','ignore') # make sure we don't save unsafe character data["_id"] = word_text # use word as an id data["word"] = word_text data["count"] = num[1] else: data = doc data["count"] = data["count"] + num[1] words.save(data) except: continue #save user data # try create user db try: user = server.create["twitter-users"] except: user = server["twitter-users"] for row in users: id = row["id"] doc = user.get(str(id)) if doc is None: row["_id"] = str(row["id"]) user.save(row) "☕".decode("utf8").encode('ascii','ignore') == "" import datetime today = datetime.date.today() today = today.strftime('%Y/%-m/%-d') print(today) ```
github_jupyter
### Natural Language Processing, a look at distinguishing subreddit categories by analyzing the text of the comments and posts **Matt Paterson, hello@hiremattpaterson.com** General Assembly Data Science Immersive, July 2020 ### Abstract **HireMattPaterson.com has been (fictionally) contracted by Virgin Galactic’s marketing team to build a Natural Language Processing Model that will efficiently predict if reddit posts are being made for the SpaceX subreddit or the Boeing subreddit as a proof of concept to segmenting the targeted markets.** We’ve created a model that predicts the silo of the post with nearly 80% accuracy (with a top score of 79.9%). To get there we tried over 2,000 different iterations on a total of 5 different classification modeling algorithms including two versions of Multinomial Naïve Bayes, Random Cut Forest, Extra Trees, and a simple Logistic Regression Classifier. We’d like to use Support Vector Machines as well as Gradient Boosting and a K-Nearest Neighbors model in our follow-up to this presentation. If you like our proof of concept, the next iteration of our model will take in to account the trend or frequency in the comments of each user; what other subreddits these users can be found to post to (are they commenting on the Rolex and Gulfstream and Maserati or are they part of the Venture Capital and AI crowd?); and if their comments appear to be professional in nature (are they looking to someday work in aerospace or maybe they already do). These trends will help the marketing team tune their tone, choose words that are trending, and speak directly to each cohort in a narrow-cast fashion thus allowing VG to spend less money on ads and on people over time. This notebook shows how we got there. ### Problem Statement: Virgin Galactic wants to charge customers USD 250K per voyage to bring customers into outer space on a pleasure cruise in null G The potential customers range from more traditional HNWI who have more conservative values, to the Nouveau Riche, and various levels of tech millionaires in between Large teams of many Marketing Analysts and Marketing Managers are expensive If you can keep your current headcount or only add a few you are better off, since as headcount grows, overall ROI tends to shrink (VG HC ~ 200 ppl) ### Solution: Create a machine learning model to identify what type of interests each user has based on their social media and reddit posts Narrowcast to each smaller cohort with the language, tone, and vocabulary that will push each to purchase the quarter-million dollar flight ## Import libraries ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import nltk import lebowski as dude from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score from sklearn.pipeline import Pipeline from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import confusion_matrix, plot_confusion_matrix from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, VotingClassifier from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier ``` ## Read in the data. In the data_file_creation.ipynb found in this directory, we have already gone to the 'https://api.pushshift.io/reddit/search/' api and pulled subreddit posts and comments from SpaceX, Boeing, BlueOrigin, and VirginGalactic; four specific companies venturing into the outer space exploration business with distinct differences therein. It is the theory of this research team that each subreddit will also have a distinct group of main users, or possible customers that are engaging on each platform. While there will be overlap in the usership, there will also be a clear lexicon that each subreddit thread has. In this particular study, we will look specifically at the differences between SpaceX and Boeing, and will create a classification model that predicts whether a post is indeed in the SpaceX subreddit or not in the SpaceX subreddit. Finally we will test the model against a testing set that is made up of posts from all four companies and measure its ability to predict which posts are SpaceX and which are not. ``` spacex = pd.read_csv('./data/spacex.csv') boeing = pd.read_csv('./data/boeing.csv') spacex.head() ``` We have already done a lot of cleaning up, but as we see there are still many NaN values and other meaningless values in our data. We'll create a function to remove these values using mapping in our dataframe. Before we get there, let's convert our target column into a binary selector. ``` spacex['subreddit'] = spacex['subreddit'].map({'spacex': 1, 'boeing': 0}) boeing['subreddit'] = boeing['subreddit'].map({'spacex': 1, 'boeing': 0}) ``` And drop the null values right off too. ``` print(f"spacex df has {spacex.isna().sum()} null values not including extraneous words") print(f"boeing df has {boeing.isna().sum()} null values not including extraneous words") ``` we can remove these 61 rows right off ``` spacex = spacex.dropna() boeing = boeing.dropna() spacex.shape boeing.shape ``` ## Merge into one dataframe ``` space_wars = pd.concat([spacex, boeing]) space_wars.shape ``` ## Use TF to break up the dataframes into numbers and then drop the unneeded words ``` tvec = TfidfVectorizer(stop_words = 'english') ``` We will only put the 'body' column in to the count vectorizer ``` X_list = space_wars.body nums_df = pd.DataFrame(tvec.fit_transform(X_list).toarray(), columns=tvec.get_feature_names()) nums_df.head() ``` And with credit to Noelle Brown, let's graph the resulting top words: ``` # get count of top-occurring words top_words_tf = {} for i in nums_df.columns: top_words_tf[i] = nums_df[i].sum() # top_words to dataframe sorted by highest occurance most_freq_tf = pd.DataFrame(sorted(top_words_tf.items(), key = lambda x: x[1], reverse = True)) plt.figure(figsize = (10, 5)) # visualize top 10 words plt.bar(most_freq_tf[0][:10], most_freq_tf[1][:10]); ``` We can see that if we remove 'replace_me', 'removed', and 'deleted', then we'll be dealing with a much more useful dataset. For the words dataframe, we can just add these words to our stop_words library. For the numeric dataframe we'll drop them here, as well as a few more. ``` dropwords = ['replace_me', 'removed', 'deleted', 'https', 'com', 'don', 'www'] nums_df = nums_df.drop(columns=dropwords) ``` And we can re-run the graph above for a better look. ``` # get count of top-occurring words top_words_tf = {} for i in nums_df.columns: top_words_tf[i] = nums_df[i].sum() # top_words to dataframe sorted by highest occurance most_freq_tf = pd.DataFrame(sorted(top_words_tf.items(), key = lambda x: x[1], reverse = True)) plt.figure(figsize = (18, 6)) dude.graph_words('black') # visualize top 10 words plt.bar(most_freq_tf[0][:15], most_freq_tf[1][:15]); ``` If I had more time I'd like to graph the words used most in each company. I can go ahead and try to display which company is more verbose, wordy that is, and which one uses longer words (Credit to Hovanes Gasparian). ``` nums_df = pd.concat([space_wars['subreddit'], nums_df]) space_wars['word_count'] = space_wars['body'].apply(dude.word_count) space_wars['post_length'] = space_wars['body'].apply(dude.count_chars) space_wars[['word_count', 'post_length']].describe().T space_wars.groupby(['word_count']).size().sort_values(ascending=False)#.head() space_wars[space_wars['word_count'] > 1000] #space_wars.groupby(['subreddit', 'word_count']).size().sort_values(ascending=False).head() space_wars.subreddit.value_counts() plt.figure(figsize=(18,6)) dude.graph_words('black') plt.hist([space_wars[space_wars['subreddit']==0]['word_count'], space_wars[space_wars['subreddit']==1]['word_count']], bins=3, color=['blue', 'red'], ec='k') plt.title('Word Count by Company', fontsize=30) plt.legend(['Boeing', 'SpaceX']); ``` ## Trouble in parsing-dise It appears that I'm having some issues with manipulating this portion of the data. I will clean this up before final pull request. ## Create test_train_split with word data #### Find the baseline: ``` baseline = space_wars.subreddit.value_counts(normalize=True)[1] all_scores = {} all_scores['baseline'] = baseline all_scores['baseline'] X_words = space_wars['body'] y_words = space_wars['subreddit'] X_train_w, X_test_w, y_train_w, y_test_w = train_test_split(X_words, y_words, random_state=42, test_size=.1, stratify=y_words) ``` ## Now it's time to train some models! ``` # Modify our stopwords list from the nltk.'english' stopwords = nltk.corpus.stopwords.words('english') # Above we created a list called dropwords for i in dropwords: stopwords.append(i) param_cv = { 'stop_words' : stopwords, 'ngram_range' : (1, 2), 'analyzer' : 'word', 'max_df' : 0.8, 'min_df' : 0.02, } cntv = CountVectorizer(param_cv) # Print y_test for a sanity check y_test_w # credit Noelle from lecture train_data_features = cntv.fit_transform(X_train_w, y_train_w) test_data_features = cntv.transform(X_test_w) ``` ## Logistic Regression ``` lr = LogisticRegression( max_iter = 10_000) lr.fit(train_data_features, y_train_w) lr.score(train_data_features, y_train_w) all_scores['Logistic Regression'] = lr.score(test_data_features, y_test_w) all_scores['Logistic Regression'] ``` ***Using a simple Logistic regression with very little tweaking, a set of stopwords, we created a model that while slightly overfit, is more than 22 points more accurate than the baseline.*** ## What does the confusion matrix look like? Is 80% accuracy even good? Perhaps I can get some help making a confusion matrix with this data? ## Multinomial Naive Bayes using CountVectorizer In this section we will create a Pipeline that starts with the CountVectorizer and ends with the Multinomial Naive Bayes Algorithm. We'll run through 270 possible configurations of this model, and run it in parallel on 3 of the 4 cores on my machine. ``` pipe = Pipeline([ ('count_v', CountVectorizer()), ('nb', MultinomialNB()) ]) pipe_params = { 'count_v__max_features': [2000, 5000, 9000], 'count_v__stop_words': [stopwords], 'count_v__min_df': [2, 3, 10], 'count_v__max_df': [.9, .8, .7], 'count_v__ngram_range': [(1, 1), (1, 2)] } gs = GridSearchCV(pipe, pipe_params, cv = 5, n_jobs=6 ) %%time gs.fit(X_train_w, y_train_w) gs.best_params_ all_scores['Naive Bayes'] = gs.best_score_ all_scores['Naive Bayes'] gs.best_index_ # is this the index that has the best indication of being positive? ``` We see that our Naive Bayes model yields an accuracy score just shy of our Logistic Regression model, 79.7% **What does the confusion matrix look like?** ``` # Get predictions and true/false pos/neg preds = gs.predict(X_test_w) tn, fp, fn, tp = confusion_matrix(y_test_w, preds).ravel() # View confusion matrix dude.graph_words('black') plot_confusion_matrix(gs, X_test_w, y_test_w, cmap='Blues', values_format='d'); sensitivity = tp / (tp + fp) sensitivity specificity = tn / (tn + fn) specificity ``` ## Naive Bayes using the TFID Vectorizer ``` pipe_tvec = Pipeline([ ('tvec', TfidfVectorizer()), ('nb', MultinomialNB()) ]) pipe_params_tvec = { 'tvec__max_features': [2000, 9000], 'tvec__stop_words' : [None, stopwords], 'tvec__ngram_range': [(1, 1), (1, 2)] } gs_tvec = GridSearchCV(pipe_tvec, pipe_params_tvec, cv = 5) %%time gs_tvec.fit(X_train_w, y_train_w) all_scores['Naive Bayes TFID'] = gs_tvec.best_score_ all_scores['Naive Bayes TFID'] all_scores # Confusion Matrix for tvec preds = gs_tvec.predict(X_test_w) tn, fp, fn, tp = confusion_matrix(y_test_w, preds).ravel() # View confusion matrix dude.graph_words('black') plot_confusion_matrix(gs_tvec, X_test_w, y_test_w, cmap='Blues', values_format='d'); specificity = tn / (tn+fn) specificity sensitivity = tp / (tp+fp) sensitivity ``` Here, the specificity is 4 points higher than the NB using Count Vectorizer, but the sensitity and overall accuracy are about the same. ## Random Cut Forest and Extra Trees ``` pipe_rf = Pipeline([ ('count_v', CountVectorizer()), ('rf', RandomForestClassifier()), ]) pipe_ef = Pipeline([ ('count_v', CountVectorizer()), ('ef', ExtraTreesClassifier()), ]) pipe_params = { 'count_v__max_features': [2000, 5000, 9000], 'count_v__stop_words': [stopwords], 'count_v__min_df': [2, 3, 10], 'count_v__max_df': [.9, .8, .7], 'count_v__ngram_range': [(1, 1), (1, 2)] } %%time gs_rf = GridSearchCV(pipe_rf, pipe_params, cv = 5, n_jobs=6) gs_rf.fit(X_train_w, y_train_w) print(gs_rf.best_score_) gs_rf.best_params_ gs_rf.best_estimator_ all_scores['Random Cut Forest'] = gs_rf.best_score_ all_scores # Confusion Matrix for Random Cut Forest preds = gs_rf.predict(X_test_w) tn, fp, fn, tp = confusion_matrix(y_test_w, preds).ravel() # View confusion matrix dude.graph_words('black') plot_confusion_matrix(gs_rf, X_test_w, y_test_w, cmap='Blues', values_format='d'); specificity = tn / (tn+fn) specificity sensitivity = tp / (tp+fp) sensitivity ``` Our original Logistic Regression model is still the winner. ## What does the matchup look like? ``` score_df = pd.DataFrame([all_scores]) score_df.shape score_df.head() ``` ## Create a Count Vecotorized dataset Since the below cells have been troublesome, we'll create a dataset using only the count vectorizer and then use that data in the model as we did above. ``` # Re-establish the subsets using Noelle's starter script again train_data_features = cntv.fit_transform(X_train_w, y_train_w) test_data_features = cntv.transform(X_test_w) pipe_params_tvec = { 'tvec__max_features': [2000, 9000], 'tvec__stop_words' : [None, stopwords], 'tvec__ngram_range': [(1, 1), (1, 2)] } knn_pipe = Pipeline([ ('ss', StandardScaler()), ('knn', KNeighborsClassifier()) ]) tree_pipe = Pipeline([ ('tvec', TfidfVectorizer()), ('tree', DecisionTreeClassifier()) ]) ada_pipe = Pipeline([ ('tvec', TfidfVectorizer()), ('ada', AdaBoostClassifier(base_estimator=DecisionTreeClassifier())), ]) grad_pipe = Pipeline([ ('tvec', TfidfVectorizer()), ('grad_boost', GradientBoostingClassifier()), ]) ``` ### Irreconcilable Error: At this time there are still structural issues that are not allowing this last block of code to complete the final model attempts (user error). ***In the next few days, prior to publication, this notebook will be revamped and this final cell will execute.*** ``` %%time vote = VotingClassifier([ ('ada', AdaBoostClassifier(base_estimator=DecisionTreeClassifier())), ('grad_boost', GradientBoostingClassifier()), ('tree', DecisionTreeClassifier()), ('knn_pipe', knn_pipe) ]) params = { 'ada__n_estimators': [50, 51], # since HPO names are common, use dunder from tuple names 'grad_boost__n_estimators': [10, 11], 'knn_pipe__knn__n_neighbors': [3, 5], 'ada__base_estimator__max_depth': [1, 2], 'tree__max_depth': [1, 2], 'weights':[[.25] * 4, [.3, .3, .3, .1]] } gs = GridSearchCV(vote, param_grid=params, cv=3) gs.fit(train_data_features, y_train_w) print(gs.best_score_) gs.best_params_ ```
github_jupyter
<a href="https://colab.research.google.com/github/vitutorial/exercises/blob/master/LatentFactorModel/LatentFactorModel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` %matplotlib inline import os import re import urllib.request import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt import itertools from torch.utils.data import Dataset, DataLoader from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") ``` In this notebook you will work with a deep generative language model that maps words from a discrete (bit-vector-valued) latent space. We will use text data (we will work on the character level) in Spanish and pytorch. The first section concerns data manipulation and data loading classes necessary for our implementation. You do not need to modify anything in this part of the code. Let's first download the SIGMORPHON dataset that we will be using for this notebook: these are inflected Spanish words together with some morphosyntactic descriptors. For this notebook we will ignore the morphosyntactic descriptors. ``` url = "https://raw.githubusercontent.com/ryancotterell/sigmorphon2016/master/data/" train_file = "spanish-task1-train" val_file = "spanish-task1-dev" test_file = "spanish-task1-test" print("Downloading data files...") if not os.path.isfile(train_file): urllib.request.urlretrieve(url + train_file, filename=train_file) if not os.path.isfile(val_file): urllib.request.urlretrieve(url + val_file, filename=val_file) if not os.path.isfile(test_file): urllib.request.urlretrieve(url + test_file, filename=test_file) print("Download complete.") ``` # Data In order to work with text data, we need to transform the text into something that our algorithms can work with. The first step of this process is converting words into word ids. We do this by constructing a vocabulary from the data, assigning a new word id to each new word it encounters. ``` UNK_TOKEN = "?" PAD_TOKEN = "_" SOW_TOKEN = ">" EOW_TOKEN = "." def extract_inflected_word(s): """ Extracts the inflected words in the SIGMORPHON dataset. """ return s.split()[-1] class Vocabulary: def __init__(self): self.idx_to_char = {0: UNK_TOKEN, 1: PAD_TOKEN, 2: SOW_TOKEN, 3: EOW_TOKEN} self.char_to_idx = {UNK_TOKEN: 0, PAD_TOKEN: 1, SOW_TOKEN: 2, EOW_TOKEN: 3} self.word_freqs = {} def __getitem__(self, key): return self.char_to_idx[key] if key in self.char_to_idx else self.char_to_idx[UNK_TOKEN] def word(self, idx): return self.idx_to_char[idx] def size(self): return len(self.char_to_idx) @staticmethod def from_data(filenames): """ Creates a vocabulary from a list of data files. It assumes that the data files have been tokenized and pre-processed beforehand. """ vocab = Vocabulary() for filename in filenames: with open(filename) as f: for line in f: # Strip whitespace and the newline symbol. word = extract_inflected_word(line.strip()) # Split the words into characters and assign ids to each # new character it encounters. for char in list(word): if char not in vocab.char_to_idx: idx = len(vocab.char_to_idx) vocab.char_to_idx[char] = idx vocab.idx_to_char[idx] = char return vocab # Construct a vocabulary from the training and validation data. print("Constructing vocabulary...") vocab = Vocabulary.from_data([train_file, val_file]) print("Constructed a vocabulary of %d types" % vocab.size()) # some examples print('e', vocab['e']) print('é', vocab['é']) print('ș', vocab['ș']) # something UNKNOWN ``` We also need to load the data files into memory. We create a simple class `TextDataset` that stores the data as a list of words: ``` class TextDataset(Dataset): """ A simple class that loads a list of words into memory from a text file, split by newlines. This does not do any memory optimisation, so if your dataset is very large, you might want to use an alternative class. """ def __init__(self, text_file, max_len=30): self.data = [] with open(text_file) as f: for line in f: word = extract_inflected_word(line.strip()) if len(list(word)) <= max_len: self.data.append(word) def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] # Load the training, validation, and test datasets into memory. train_dataset = TextDataset(train_file) val_dataset = TextDataset(val_file) test_dataset = TextDataset(test_file) # Print some samples from the data: print("Sample from training data: \"%s\"" % train_dataset[np.random.choice(len(train_dataset))]) print("Sample from validation data: \"%s\"" % val_dataset[np.random.choice(len(val_dataset))]) print("Sample from test data: \"%s\"" % test_dataset[np.random.choice(len(test_dataset))]) ``` Now it's time to write a function that converts a word into a list of character ids using the vocabulary we created before. This function is `create_batch` in the code cell below. This function creates a batch from a list of words, and makes sure that each word starts with a start-of-word symbol and ends with an end-of-word symbol. Because not all words are of equal length in a certain batch, words are padded with padding symbols so that they match the length of the largest word in the batch. The function returns an input batch, an output batch, a mask of 1s for words and 0s for padding symbols, and the sequence lengths of each word in the batch. The output batch is shifted by one character, to reflect the predictions that the model is expected to make. For example, for a word \begin{align} \text{e s p e s e m o s} \end{align} the input sequence is \begin{align} \text{SOW e s p e s e m o s} \end{align} and the output sequence is \begin{align} \text{e s p e s e m o s EOW} \end{align} You can see the output is shifted wrt the input, that's because we will be computing a distribution for the next character in context of its prefix, and that's why we need to shift the sequence this way. Lastly, we create an inverse function `batch_to_words` that recovers the list of words from a padded batch of character ids to use during test time. ``` def create_batch(words, vocab, device, word_dropout=0.): """ Converts a list of words to a padded batch of word ids. Returns an input batch, an output batch shifted by one, a sequence mask over the input batch, and a tensor containing the sequence length of each batch element. :param words: a list of words, each a list of token ids :param vocab: a Vocabulary object for this dataset :param device: :param word_dropout: rate at which we omit words from the context (input) :returns: a batch of padded inputs, a batch of padded outputs, mask, lengths """ tok = np.array([[SOW_TOKEN] + list(w) + [EOW_TOKEN] for w in words]) seq_lengths = [len(w)-1 for w in tok] max_len = max(seq_lengths) pad_id = vocab[PAD_TOKEN] pad_id_input = [ [vocab[w[t]] if t < seq_lengths[idx] else pad_id for t in range(max_len)] for idx, w in enumerate(tok)] # Replace words of the input with <unk> with p = word_dropout. if word_dropout > 0.: unk_id = vocab[UNK_TOKEN] word_drop = [ [unk_id if (np.random.random() < word_dropout and t < seq_lengths[idx]) else word_ids[t] for t in range(max_len)] for idx, word_ids in enumerate(pad_id_input)] # The output batch is shifted by 1. pad_id_output = [ [vocab[w[t+1]] if t < seq_lengths[idx] else pad_id for t in range(max_len)] for idx, w in enumerate(tok)] # Convert everything to PyTorch tensors. batch_input = torch.tensor(pad_id_input) batch_output = torch.tensor(pad_id_output) seq_mask = (batch_input != vocab[PAD_TOKEN]) seq_length = torch.tensor(seq_lengths) # Move all tensors to the given device. batch_input = batch_input.to(device) batch_output = batch_output.to(device) seq_mask = seq_mask.to(device) seq_length = seq_length.to(device) return batch_input, batch_output, seq_mask, seq_length def batch_to_words(tensors, vocab: Vocabulary): """ Converts a batch of word ids back to words. :param tensors: [B, T] word ids :param vocab: a Vocabulary object for this dataset :returns: an array of strings (each a word). """ words = [] batch_size = tensors.size(0) for idx in range(batch_size): word = [vocab.word(t.item()) for t in tensors[idx,:]] # Filter out the start-of-word and padding tokens. word = list(filter(lambda t: t != PAD_TOKEN and t != SOW_TOKEN, word)) # Remove the end-of-word token and all tokens following it. if EOW_TOKEN in word: word = word[:word.index(EOW_TOKEN)] words.append("".join(word)) return np.array(words) ``` In PyTorch the RNN functions expect inputs to be sorted from long words to shorter ones. Therefore we create a simple wrapper class for the DataLoader class that sorts words from long to short: ``` class SortingTextDataLoader: """ A wrapper for the DataLoader class that sorts a list of words by their lengths in descending order. """ def __init__(self, dataloader): self.dataloader = dataloader self.it = iter(dataloader) def __iter__(self): return self def __next__(self): words = None for s in self.it: words = s break if words is None: self.it = iter(self.dataloader) raise StopIteration words = np.array(words) sort_keys = sorted(range(len(words)), key=lambda idx: len(list(words[idx])), reverse=True) sorted_words = words[sort_keys] return sorted_words ``` # Model ## Deterministic language model In language modelling, we model a word $x = \langle x_1, \ldots, x_n \rangle$ of length $n = |x|$ as a sequence of categorical draws: \begin{align} X_i|x_{<i} & \sim \text{Cat}(f(x_{<i}; \theta)) & i = 1, \ldots, n \\ \end{align} where we use $x_{<i}$ to denote a (possibly empty) prefix string, and thus the model makes no Markov assumption. We map from the conditioning context, the prefix $x_{<i}$, to the categorical parameters (a $v$-dimensional probability vector, where $v$ denotes the size of the vocabulary, in this case, the size of the character set) using a fixed neural network architecture whose parameters we collectively denote by $\theta$. This assigns the following likelihood to the word \begin{align} P(x|\theta) &= \prod_{i=1}^n P(x_i|x_{<i}, \theta) \\ &= \prod_{i=1}^n \text{Cat}(x_i|f(x_{<i}; \theta)) \end{align} where the categorical pmf is $\text{Cat}(k|\pi) = \prod_{j=1}^v \pi_j^{[k=j]} = \pi_k$. Suppose we have a dataset $\mathcal D = \{x^{(1)}, \ldots, x^{(N)}\}$ containing $N$ i.i.d. observations. Then we can use the log-likelihood function \begin{align} \mathcal L(\theta|\mathcal D) &= \sum_{k=1}^{N} \log P(x^{(k)}| \theta) \\ &= \sum_{k=1}^{N} \sum_{i=1}^{|x^{(k)}|} \log \text{Cat}(x^{(k)}_i|f(x^{(k)}_{<i}; \theta)) \end{align} to estimate $\theta$ by maximisation: \begin{align} \theta^\star = \arg\max_{\theta \in \Theta} \mathcal L(\theta|\mathcal D) ~ . \end{align} We can use stochastic gradient-ascent to find a local optimum of $\mathcal L(\theta|\mathcal D)$, which only requires a gradient estimate: \begin{align} \nabla_\theta \mathcal L(\theta|\mathcal D) &= \sum_{k=1}^{|\mathcal D|} \nabla_\theta \log P(x^{(k)}|\theta) \\ &= \sum_{k=1}^{|\mathcal D|} \frac{1}{N} N \nabla_\theta \log P(x^{(k)}| \theta) \\ &= \mathbb E_{\mathcal U(1/N)} \left[ N \nabla_\theta \log P(x^{(K)}| \theta) \right] \\ &\overset{\text{MC}}{\approx} \frac{N}{M} \sum_{m=1}^M \nabla_\theta \log P(x^{(k_m)}|\theta) \\ &\text{where }K_m \sim \mathcal U(1/N) \end{align} This is a Monte Carlo (MC) estimate of the gradient computed on $M$ data points selected uniformly at random from $\mathcal D$. For as long as $f$ remains differentiable wrt to its inputs and parameters, we can rely on automatic differentiation to obtain gradient estimates. An example design for $f$ is: \begin{align} \mathbf x_i &= \text{emb}(x_i; \theta_{\text{emb}}) \\ \mathbf h_0 &= \mathbf 0 \\ \mathbf h_i &= \text{rnn}(\mathbf h_{i-1}, \mathbf x_{i-1}; \theta_{\text{rnn}}) \\ f(x_{<i}; \theta) &= \text{softmax}(\text{dense}_v(\mathbf h_{i}; \theta_{\text{out}})) \end{align} where * $\text{emb}$ is a fixed embedding layer with parameters $\theta_{\text{emb}}$; * $\text{rnn}$ is a recurrent architecture with parameters $\theta_{\text{rnn}}$, e.g. an LSTM or GRU, and $\mathbf h_0$ is part of the architecture's parameters; * $\text{dense}_v$ is a dense layer with $v$ outputs (vocabulary size) and parameters $\theta_{\text{out}}$. In what follows we show how to extend this model with a continuous latent word embedding. ## Deep generative language model We want to model a word $x$ as a draw from the marginal of deep generative model $P(z, x|\theta, \alpha) = P(z|\alpha)P(x|z, \theta)$. ### Generative model The generative story is: \begin{align} Z_k & \sim \text{Bernoulli}(\alpha_k) & k=1,\ldots, K \\ X_i | z, x_{<i} &\sim \text{Cat}(f(z, x_{<i}; \theta)) & i=1, \ldots, n \end{align} where $z \in \mathbb R^K$ and we impose a product of independent Bernoulli distributions prior. Other choices of prior can induce interesting properties in latent space, for example, the Bernoullis could be correlated, however, in this notebook, we use independent distributions. **About the prior parameter** The parameter of the $k$th Bernoulli distribution is the probability that the $k$th bit in $z$ is set to $1$, and therefore, if we have reasons to believe some bits are more frequent than others (for example, because we expect some bits to capture verb attributes and others to capture noun attributes, and we know nouns are more frequent than verbs) we may be able to have a good guess at $\alpha_k$ for different $k$, otherwise, we may simply say that bits are about as likely to be on or off a priori, thus setting $\alpha_k = 0.5$ for every $k$. In this lab, we will treat the prior parameter ($\alpha$) as *fixed*. **Architecture** It is easy to design $f$ by a simple modification of the deterministic design shown before: \begin{align} \mathbf x_i &= \text{emb}(x_i; \theta_{\text{emb}}) \\ \mathbf h_0 &= \tanh(\text{dense}(z; \theta_{\text{init}})) \\ \mathbf h_i &= \text{rnn}(\mathbf h_{i-1}, \mathbf x_{i-1}; \theta_{\text{rnn}}) \\ f(x_{<i}; \theta) &= \text{softmax}(\text{dense}_v(\mathbf h_{i}; \theta_{\text{out}})) \end{align} where we just initialise the recurrent cell using $z$. Note we could also use $z$ in other places, for example, as additional input to every update of the recurrent cell $\mathbf h_i = \text{rnn}(\mathbf h_{i-1}, [\mathbf x_{i-1}, z])$. This is an architecture choice which like many others can only be judged empirically or on the basis of practical convenience. ### Parameter estimation The marginal likelihood, necessary for parameter estimation, is now no longer tractable: \begin{align} P(x|\theta, \alpha) &= \sum_{z \in \{0,1\}^K} P(z|\alpha)P(x|z, \theta) \\ &= \sum_{z \in \{0,1\}^K} \prod_{k=1}^K \text{Bernoulli}(z_k|\alpha_k)\prod_{i=1}^n \text{Cat}(x_i|f(z,x_{<i}; \theta) ) \end{align} the intractability is clear as there is an exponential number of assignments to $z$, namely, $2^K$. We turn to variational inference and derive a lowerbound $\mathcal E(\theta, \lambda|\mathcal D)$ on the log-likelihood function \begin{align} \mathcal E(\theta, \lambda|\mathcal D) &= \sum_{s=1}^{|\mathcal D|} \mathcal E_s(\theta, \lambda|x^{(s)}) \end{align} which for a single datapoint $x$ is \begin{align} \mathcal E(\theta, \lambda|x) &= \mathbb{E}_{Q(z|x, \lambda)}\left[\log P(x|z, \theta)\right] - \text{KL}\left(Q(z|x, \lambda)||P(z|\alpha)\right)\\ \end{align} where we have introduce an independently parameterised auxiliary distribution $Q(z|x, \lambda)$. The distribution $Q$ which maximises this *evidence lowerbound* (ELBO) is also the distribution that minimises \begin{align} \text{KL}(Q(z|x, \lambda)||P(z|x, \theta, \alpha)) = \mathbb E_{Q(z|x, \lambda)}\left[\log \frac{Q(z|x, \lambda)}{P(z|x, \theta, \alpha)}\right] \end{align} where $P(z|x, \theta, \alpha) = \frac{P(x, z|\theta, \alpha)}{P(x|\theta, \alpha)}$ is our intractable true posterior. For that reason, we think of $Q(z|x, \lambda)$ as an *approximate posterior*. The approximate posterior is an independent model of the latent variable given the data, for that reason we also call it an *inference model*. In this notebook, our inference model will be a product of independent Bernoulli distributions, to make sure that we cover the sample space of our latent variable. We will leave at the end of the notebook as an optional exercise to model correlations (thus achieving *structured* inference, rather than mean field inference). Such mean field (MF) approximation takes $K$ Bernoulli variational factors whose parameters we predict with a neural network: \begin{align} Q(z|x, \lambda) &= \prod_{k=1}^K \text{Bernoulli}(z_k|\beta_k(x; \lambda)) \end{align} Note we compute a *fixed* number, namely, $K$, of Bernoulli parameters. This can be done with a neural network that outputs $K$ values and employs a sigmoid activation for the outputs. For this choice, the KL term in the ELBO is tractable: \begin{align} \text{KL}\left(Q(z|x, \lambda)||P(z|\alpha)\right) &= \sum_{k=1}^K \text{KL}\left(Q(z_k|x, \lambda)||P(z_k|\alpha_k)\right) \\ &= \sum_{k=1}^K \text{KL}\left(\text{Bernoulli}(\beta_k(x;\lambda))|| \text{Bernoulli}(\alpha_k)\right) \\ &= \sum_{k=1}^K \beta_k(x;\lambda) \log \frac{\beta_k(x;\lambda)}{\alpha_k} + (1-\beta_k(x;\lambda)) \log \frac{1-\beta_k(x;\lambda)}{1-\alpha_k} \end{align} Here's an example design for our inference model: \begin{align} \mathbf x_i &= \text{emb}(x_i; \lambda_{\text{emb}}) \\ \mathbf f_i &= \text{rnn}(\mathbf f_{i-1}, \mathbf x_{i}; \lambda_{\text{fwd}}) \\ \mathbf b_i &= \text{rnn}(\mathbf b_{i+1}, \mathbf x_{i}; \lambda_{\text{bwd}}) \\ \mathbf h &= \text{dense}([\mathbf f_{n}, \mathbf b_1]; \lambda_{\text{hid}}) \\ \beta(x; \lambda) &= \text{sigmoid}(\text{dense}_K(\mathbf h; \lambda_{\text{out}})) \end{align} where we use the $\text{sigmoid}$ activation to make sure our probabilities are independently set between $0$ and $1$. Because we have neural networks compute the Bernoulli variational factors for us, we call this *amortised* mean field inference. ### Gradient estimation We have to obtain gradients of the ELBO with respect to $\theta$ (generative model) and $\lambda$ (inference model). Recall we will leave $\alpha$ fixed. For the **generative model** \begin{align} \nabla_\theta \mathcal E(\theta, \lambda|x) &=\nabla_\theta\sum_{z} Q(z|x, \lambda)\log P(x|z,\theta) - \underbrace{\nabla_\theta \sum_{k=1}^K \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k))}_{\color{blue}{0}} \\ &=\sum_{z} Q(z|x, \lambda)\nabla_\theta\log P(x|z,\theta) \\ &= \mathbb E_{Q(z|x, \lambda)}\left[\nabla_\theta\log P(x|z,\theta) \right] \\ &\overset{\text{MC}}{\approx} \frac{1}{S} \sum_{s=1}^S \nabla_\theta \log P(x|z^{(s)}, \theta) \end{align} where $z^{(s)} \sim Q(z|x,\lambda)$. Note there is no difficulty in obtaining gradient estimates precisely because the samples come from the inference model and therefore do not interfere with backpropagation for updates to $\theta$. For the **inference model** the story is less straightforward, and we have to use the *score function estimator* (a.k.a. REINFORCE): \begin{align} \nabla_\lambda \mathcal E(\theta, \lambda|x) &=\nabla_\lambda\sum_{z} Q(z|x, \lambda)\log P(x|z,\theta) - \nabla_\lambda \underbrace{\sum_{k=1}^K \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k))}_{ \color{blue}{\text{tractable} }} \\ &=\sum_{z} \nabla_\lambda Q(z|x, \lambda)\log P(x|z,\theta) - \sum_{k=1}^K \nabla_\lambda \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k)) \\ &=\sum_{z} \underbrace{Q(z|x, \lambda) \nabla_\lambda \log Q(z|x, \lambda)}_{\nabla_\lambda Q(z|x, \lambda)} \log P(x|z,\theta) - \sum_{k=1}^K \nabla_\lambda \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k)) \\ &= \mathbb E_{Q(z|x, \lambda)}\left[ \log P(x|z,\theta) \nabla_\lambda \log Q(z|x, \lambda) \right] - \sum_{k=1}^K \nabla_\lambda \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k)) \\ &\overset{\text{MC}}{\approx} \left(\frac{1}{S} \sum_{s=1}^S \log P(x|z^{(s)}, \theta) \nabla_\lambda \log Q(z^{(s)}|x, \lambda) \right) - \sum_{k=1}^K \nabla_\lambda \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k)) \end{align} where $z^{(s)} \sim Q(z|x,\lambda)$. ## Implementation Let's implement the model and the loss (negative ELBO). We work with the notion of a *surrogate loss*, that is, a computation node whose gradients wrt to parameters are equivalent to the gradients we need. For a given sample $z \sim Q(z|x, \lambda)$, the following is a single-sample surrogate loss: \begin{align} \mathcal S(\theta, \lambda|x) = \log P(x|z, \theta) + \color{red}{\text{detach}(\log P(x|z, \theta) )}\log Q(z|x, \lambda) - \sum_{k=1}^K \text{KL}(Q(z_k|x, \lambda) || P(z_k|\alpha_k)) \end{align} Check the documentation of pytorch's `detach` method. Show that it's gradients wrt $\theta$ and $\lambda$ are exactly what we need: \begin{align} \nabla_\theta \mathcal S(\theta, \lambda|x) = \color{red}{?} \end{align} \begin{align} \nabla_\lambda \mathcal S(\theta, \lambda|x) = \color{red}{?} \end{align} Let's now turn to the actual implementation in pytorch of the inference model as well as the generative model. Here and there we will provide helper code for you. ``` def bernoulli_log_probs_from_logits(logits): """ Let p be the Bernoulli parameter and q = 1 - p. This function is a stable computation of p and q from logit = log(p/q). :param logit: log (p/q) :return: log_p, log_q """ return - F.softplus(-logits), - F.softplus(logits) ``` We start with the implementation of a product of Bernoulli distributions where the parameters are *given* at construction time. That is, for some vector $b_1, \ldots, b_K$ we have \begin{equation} Z_k \sim \text{Bernoulli}(b_k) \end{equation} and thus the joint probability of $z_1, \ldots, z_K$ is given by $\prod_{k=1}^K \text{Bernoulli}(z_k|b_k)$. ``` class ProductOfBernoullis: """ This is class models a product of independent Bernoulli distributions. Each product of Bernoulli is defined by a D-dimensional vector of logits for each independent Bernoulli variable. """ def __init__(self, logits): """ :param p: a tensor of D Bernoulli parameters (logits) for each batch element. [B, D] """ pass def mean(self): """For Bernoulli variables this is the probability of each Bernoulli being 1.""" return None def std(self): """For Bernoulli variables this is p*(1-p) where p is the probability of the Bernoulli being 1""" return self.probs * (1.0 - self.probs) def sample(self): """ Returns a sample with the shape of the Bernoulli parameter. # [B, D] """ return None def log_prob(self, x): """ Assess the log probability mass of x. :param x: a tensor of Bernoulli samples (same shape as the Bernoulli parameter) [B, D] :returns: tensor of log probabilitie densities [B] """ return None def unstable_kl(self, other: 'Bernoulli'): """ The straightforward implementation of the KL between two Bernoullis. This implementation is unstable, a stable implementation is provided in ProductOfBernoullis.kl(self, q) :returns: a tensor of KL values with the same shape as the parameters of self. """ return None def kl(self, other: 'Bernoulli'): """ A stable implementation of the KL divergence between two Bernoulli variables. :returns: a tensor of KL values with the same shape as the parameters of self. """ return None ``` Then we should implement the inference model $Q(z | x, \lambda)$, that is, a module that uses a neural network to map from a data point $x$ to the parameters of a product of Bernoullis. You might want to consult the documentation of * `torch.nn.Embedding` * `torch.nn.LSTM` * `torch.nn.Linear` * and of our own `ProductOfBernoullis` distribution (see above). ``` class InferenceModel(nn.Module): def __init__(self, vocab_size, embedder, hidden_size, latent_size, pad_idx, bidirectional=False): """ Implement the layers in the inference model. :param vocab_size: size of the vocabulary of the language :param embedder: embedding layer :param hidden_size: size of recurrent cell :param latent_size: size K of the latent variable :param pad_idx: id of the -PAD- token :param bidirectional: whether we condition on x via a bidirectional or unidirectional encoder """ super().__init__() # pytorch modules should always start with this pass # Construct your NN blocks here # and make sure every block is an attribute of self # or they won't get initialised properly # for example, self.my_linear_layer = torch.nn.Linear(...) def forward(self, x, seq_mask, seq_len) -> ProductOfBernoullis: """ Return an inference product of Bernoullis per instance in the mini-batch :param x: words [B, T] as token ids :param seq_mask: indicates valid positions vs padding positions [B, T] :param seq_len: the length of the sequences [B] :return: a collection of B ProductOfBernoullis approximate posterior, each a distribution over K-dimensional bit vectors """ pass # tests for inference model pad_idx = vocab.char_to_idx[PAD_TOKEN] dummy_inference_model = InferenceModel( vocab_size=vocab.size(), embedder=nn.Embedding(vocab.size(), 64, padding_idx=pad_idx), hidden_size=128, latent_size=16, pad_idx=pad_idx, bidirectional=True ).to(device=device) dummy_batch_size = 32 dummy_dataloader = SortingTextDataLoader(DataLoader(train_dataset, batch_size=dummy_batch_size)) dummy_words = next(dummy_dataloader) x_in, _, seq_mask, seq_len = create_batch(dummy_words, vocab, device) q_z_given_x = dummy_inference_model.forward(x_in, seq_mask, seq_len) ``` Then we should implement the generative latent factor model. The decoder is a sequence of correlated Categorical draws that condition on a latent factor assignment. We will be parameterising categorical distributions, so you might want to check the documentation of `torch.distributions.categorical.Categorical`. ``` from torch.distributions import Categorical class LatentFactorModel(nn.Module): def __init__(self, vocab_size, emb_size, hidden_size, latent_size, pad_idx, dropout=0.): """ :param vocab_size: size of the vocabulary of the language :param emb_size: dimensionality of embeddings :param hidden_size: dimensionality of recurrent cell :param latent_size: this is D the dimensionality of the latent variable z :param pad_idx: the id reserved to the -PAD- token :param dropout: a dropout rate (you can ignore this for now) """ super().__init__() # Construct your NN blocks here, # remember to assign them to attributes of self pass def init_hidden(self, z): """ Returns the hidden state of the LSTM initialized with a projection of a given z. :param z: [B, K] :returns: [num_layers, B, H] hidden state, [num_layers, B, H] cell state """ pass def step(self, prev_x, z, hidden): """ Performs a single LSTM step for a given previous word and hidden state. Returns the unnormalized log probabilities (logits) over the vocabulary for this time step. :param prev_x: [B, 1] id of the previous token :param z: [B, K] latent variable :param hidden: hidden ([num_layers, B, H] state, [num_layers, B, H] cell) :returns: [B, V] logits, ([num_layers, B, H] updated state, [num_layers, B, H] updated cell) """ pass def forward(self, x, z) -> Categorical: """ Performs an entire forward pass given a sequence of words x and a z. This returns a collection of [B, T] categorical distributions, each with support over V events. :param x: [B, T] token ids :param z: [B, K] a latent sample :returns: Categorical object with shape [B,T,V] """ hidden = self.init_hidden(z) outputs = [] for t in range(x.size(1)): # [B, 1] prev_x = x[:, t].unsqueeze(-1) # logits: [B, V] logits, hidden = self.step(prev_x, z, hidden) outputs.append(logits) outputs = torch.cat(outputs, dim=1) return Categorical(logits=outputs) def loss(self, output_distributions, observations, pz, qz, free_nats=0., evaluation=False): """ Computes the terms in the loss (negative ELBO) given the output Categorical distributions, observations, the prior distribution p(z), and the approximate posterior distribution q(z|x). If free_nats is nonzero it will clamp the KL divergence between the posterior and prior to that value, preventing gradient propagation via the KL if it's below that value. If evaluation is set to true, the loss will be summed instead of averaged over the batch. Returns the (surrogate) loss, the ELBO, and the KL. :returns: surrogate loss (scalar), ELBO (scalar), KL (scalar) """ pass ``` The code below is used to assess the model and also investigate what it learned. We implemented it for you, so that you can focus on the VAE part. It's useful however to learn from this example: we do interesting things like computing perplexity and sampling novel words! # Evaluation metrics During training we'd like to keep track of some evaluation metrics on the validation data in order to keep track of how our model is doing and to perform early stopping. One simple metric we can compute is the ELBO on all the validation or test data using a single sample from the approximate posterior $Q(z|x, \lambda)$: ``` def eval_elbo(model, inference_model, eval_dataset, vocab, device, batch_size=128): """ Computes a single sample estimate of the ELBO on a given dataset. This returns both the average ELBO and the average KL (for inspection). """ dl = DataLoader(eval_dataset, batch_size=batch_size) sorted_dl = SortingTextDataLoader(dl) # Make sure the model is in evaluation mode (i.e. disable dropout). model.eval() total_ELBO = 0. total_KL = 0. num_words = 0 # We don't need to compute gradients for this. with torch.no_grad(): for words in sorted_dl: x_in, x_out, seq_mask, seq_len = create_batch(words, vocab, device) # Infer the approximate posterior and construct the prior. qz = inference_model(x_in, seq_mask, seq_len) pz = ProductOfBernoullis(torch.ones_like(qz.probs) * 0.5) # Compute the unnormalized probabilities using a single sample from the # approximate posterior. z = qz.sample() # Compute distributions X_i|z, x_{<i} px_z = model(x_in, z) # Compute the reconstruction loss and KL divergence. loss, ELBO, KL = model.loss(px_z, x_out, pz, qz, z, free_nats=0., evaluation=True) total_ELBO += ELBO total_KL += KL num_words += x_in.size(0) # Return the average reconstruction loss and KL. avg_ELBO = total_ELBO / num_words avg_KL = total_KL / num_words return avg_ELBO, avg_KL dummy_lm = LatentFactorModel( vocab.size(), emb_size=64, hidden_size=128, latent_size=16, pad_idx=pad_idx).to(device=device) !head -n 128 {val_file} > ./dummy_dataset dummy_data = TextDataset('./dummy_dataset') dummy_ELBO, dummy_kl = eval_elbo(dummy_lm, dummy_inference_model, dummy_data, vocab, device) print(dummy_ELBO, dummy_kl) assert dummy_kl.item() > 0 ``` A common metric to evaluate language models is the perplexity per word. The perplexity per word for a dataset is defined as: \begin{align} \text{ppl}(\mathcal{D}|\theta, \lambda) = \exp\left(-\frac{1}{\sum_{k=1}^{|\mathcal D|} n^{(k)}} \sum_{k=1}^{|\mathcal{D}|} \log P(x^{(k)}|\theta, \lambda)\right) \end{align} where $n^{(k)} = |x^{(k)}|$ is the number of tokens in a word and $P(x^{(k)}|\theta, \lambda)$ is the probability that our model assigns to the datapoint $x^{(k)}$. In order to compute $\log P(x|\theta, \lambda)$ for our model we need to evaluate the marginal: \begin{align} P(x|\theta, \lambda) = \sum_{z \in \{0, 1\}^K} P(x|z,\theta) P(z|\alpha) \end{align} As this is summation cannot be computed in a reasonable amount of time (due to exponential complexity), we have two options: we can use the earlier derived lower-bound on the log-likelihood, which will give us an upper-bound on the perplexity, or we can make an importance sampling estimate using our approximate posterior distribution. The importance sampling (IS) estimate can be done as: \begin{align} \hat P(x|\theta, \lambda) &\overset{\text{IS}}{\approx} \frac{1}{S} \sum_{s=1}^{S} \frac{P(z^{(s)}|\alpha)P(x|z^{(s)}, \theta)}{Q(z^{(s)}|x)} & \text{where }z^{(s)} \sim Q(z|x) \end{align} where $S$ is the number of samples. Then our perplexity becomes: \begin{align} &\frac{1}{\sum_{k=1}^{|\mathcal D|} n^{(k)}} \sum_{k=1}^{|\mathcal D|} \log P(x^{(k)}|\theta) \\ &\approx \frac{1}{\sum_{k=1}^{|\mathcal D|} n^{(k)}} \sum_{k=1}^{|\mathcal D|} \log \frac{1}{S} \sum_{s=1}^{S} \frac{P(z^{(s)}|\alpha)P(x^{(k)}|z^{(s)}, \theta)}{Q(z^{(s)}|x^{(k)})} \\ \end{align} We define the function `eval_perplexity` below that implements this importance sampling estimate: ``` def eval_perplexity(model, inference_model, eval_dataset, vocab, device, n_samples, batch_size=128): """ Estimates the per-word perplexity using importance sampling with the given number of samples. """ dl = DataLoader(eval_dataset, batch_size=batch_size) sorted_dl = SortingTextDataLoader(dl) # Make sure the model is in evaluation mode (i.e. disable dropout). model.eval() log_px = 0. num_predictions = 0 num_words = 0 # We don't need to compute gradients for this. with torch.no_grad(): for words in sorted_dl: x_in, x_out, seq_mask, seq_len = create_batch(words, vocab, device) # Infer the approximate posterior and construct the prior. qz = inference_model(x_in, seq_mask, seq_len) pz = ProductOfBernoullis(torch.ones_like(qz.probs) * 0.5) # TODO different prior # Create an array to hold all samples for this batch. batch_size = x_in.size(0) log_px_samples = torch.zeros(n_samples, batch_size) # Sample log P(x) n_samples times. for s in range(n_samples): # Sample a z^s from the posterior. z = qz.sample() # Compute log P(x^k|z^s) px_z = model(x_in, z) # [B, T] cond_log_prob = px_z.log_prob(x_out) cond_log_prob = torch.where(seq_mask, cond_log_prob, torch.zeros_like(cond_log_prob)) # [B] cond_log_prob = cond_log_prob.sum(-1) # Compute log p(z^s) and log q(z^s|x^k) prior_log_prob = pz.log_prob(z) # B posterior_log_prob = qz.log_prob(z) # B # Store the sample for log P(x^k) importance weighted with p(z^s)/q(z^s|x^k). log_px_sample = cond_log_prob + prior_log_prob - posterior_log_prob log_px_samples[s] = log_px_sample # Average over the number of samples and count the number of predictions made this batch. log_px_batch = torch.logsumexp(log_px_samples, dim=0) - \ torch.log(torch.Tensor([n_samples])) log_px += log_px_batch.sum() num_predictions += seq_len.sum() num_words += seq_len.size(0) # Compute and return the perplexity per word. perplexity = torch.exp(-log_px / num_predictions) NLL = -log_px / num_words return perplexity, NLL ``` Lastly, we want to occasionally qualitatively see the performance of the model during training, by letting it reconstruct a given word from the latent space. This gives us an idea of whether the model is using the latent space to encode some semantics about the data. For this we use a deterministic greedy decoding algorithm, that chooses the word with maximum probability at every time step, and feeds that word into the next time step. ``` def greedy_decode(model, z, vocab, max_len=50): """ Greedily decodes a word from a given z, by picking the word with maximum probability at each time step. """ # Disable dropout. model.eval() # Don't compute gradients. with torch.no_grad(): batch_size = z.size(0) # We feed the model the start-of-word symbol at the first time step. prev_x = torch.ones(batch_size, 1, dtype=torch.long).fill_(vocab[SOW_TOKEN]).to(z.device) # Initialize the hidden state from z. hidden = model.init_hidden(z) predictions = [] for t in range(max_len): logits, hidden = model.step(prev_x, z, hidden) # Choose the argmax of the unnnormalized probabilities as the # prediction for this time step. prediction = torch.argmax(logits, dim=-1) predictions.append(prediction) prev_x = prediction.view(batch_size, 1) return torch.cat(predictions, dim=1) ``` # Training Now it's time to train the model. We use early stopping on the validation perplexity for model selection. ``` # Define the model hyperparameters. emb_size = 256 hidden_size = 256 latent_size = 16 bidirectional_encoder = True free_nats = 0 # 5. annealing_steps = 0 # 11400 dropout = 0.6 word_dropout = 0 # 0.75 batch_size = 64 learning_rate = 0.001 num_epochs = 20 n_importance_samples = 3 # 50 # Create the training data loader. dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) sorted_dl = SortingTextDataLoader(dl) # Create the generative model. model = LatentFactorModel(vocab_size=vocab.size(), emb_size=emb_size, hidden_size=hidden_size, latent_size=latent_size, pad_idx=vocab[PAD_TOKEN], dropout=dropout) model = model.to(device) # Create the inference model. inference_model = InferenceModel(vocab_size=vocab.size(), embedder=model.embedder, hidden_size=hidden_size, latent_size=latent_size, pad_idx=vocab[PAD_TOKEN], bidirectional=bidirectional_encoder) inference_model = inference_model.to(device) # Create the optimizer. optimizer = optim.Adam(itertools.chain(model.parameters(), inference_model.parameters()), lr=learning_rate) # Save the best model (early stopping). best_model = "./best_model.pt" best_val_ppl = float("inf") best_epoch = 0 # Keep track of some statistics to plot later. train_ELBOs = [] train_KLs = [] val_ELBOs = [] val_KLs = [] val_perplexities = [] val_NLLs = [] step = 0 training_ELBO = 0. training_KL = 0. num_batches = 0 for epoch_num in range(1, num_epochs+1): for words in sorted_dl: # Make sure the model is in training mode (for dropout). model.train() # Transform the words to input, output, seq_len, seq_mask batches. x_in, x_out, seq_mask, seq_len = create_batch(words, vocab, device, word_dropout=word_dropout) # Compute the multiplier for the KL term if we do annealing. if annealing_steps > 0: KL_weight = min(1., (1.0 / annealing_steps) * step) else: KL_weight = 1. # Do a forward pass through the model and compute the training loss. We use # a reparameterized sample from the approximate posterior during training. qz = inference_model(x_in, seq_mask, seq_len) pz = ProductOfBernoullis(torch.ones_like(qz.probs) * 0.5) z = qz.sample() px_z = model(x_in, z) loss, ELBO, KL = model.loss(px_z, x_out, pz, qz, z, free_nats=free_nats) # Backpropagate and update the model weights. loss.backward() optimizer.step() optimizer.zero_grad() # Update some statistics to track for the training loss. training_ELBO += ELBO training_KL += KL num_batches += 1 # Every 100 steps we evaluate the model and report progress. if step % 100 == 0: val_ELBO, val_KL = eval_elbo(model, inference_model, val_dataset, vocab, device) print("(%d) step %d: training ELBO (KL) = %.2f (%.2f) --" " KL weight = %.2f --" " validation ELBO (KL) = %.2f (%.2f)" % (epoch_num, step, training_ELBO/num_batches, training_KL/num_batches, KL_weight, val_ELBO, val_KL)) # Update some statistics for plotting later. train_ELBOs.append((step, (training_ELBO/num_batches).item())) train_KLs.append((step, (training_KL/num_batches).item())) val_ELBOs.append((step, val_ELBO.item())) val_KLs.append((step, val_KL.item())) # Reset the training statistics. training_ELBO = 0. training_KL = 0. num_batches = 0 step += 1 # After an epoch we'll compute validation perplexity and save the model # for early stopping if it's better than previous models. print("Finished epoch %d" % (epoch_num)) val_perplexity, val_NLL = eval_perplexity(model, inference_model, val_dataset, vocab, device, n_importance_samples) val_ELBO, val_KL = eval_elbo(model, inference_model, val_dataset, vocab, device) # Keep track of the validation perplexities / NLL. val_perplexities.append((epoch_num, val_perplexity.item())) val_NLLs.append((epoch_num, val_NLL.item())) # If validation perplexity is better, store this model for early stopping. if val_perplexity < best_val_ppl: best_val_ppl = val_perplexity best_epoch = epoch_num torch.save(model.state_dict(), best_model) # Print epoch statistics. print("Evaluation epoch %d:\n" " - validation perplexity: %.2f\n" " - validation NLL: %.2f\n" " - validation ELBO (KL) = %.2f (%.2f)" % (epoch_num, val_perplexity, val_NLL, val_ELBO, val_KL)) # Also show some qualitative results by reconstructing a word from the # validation data. Use the mean of the approximate posterior and greedy # decoding. random_word = val_dataset[np.random.choice(len(val_dataset))] x_in, _, seq_mask, seq_len = create_batch([random_word], vocab, device) qz = inference_model(x_in, seq_mask, seq_len) z = qz.mean() reconstruction = greedy_decode(model, z, vocab) reconstruction = batch_to_words(reconstruction, vocab)[0] print("-- Original word: \"%s\"" % random_word) print("-- Model reconstruction: \"%s\"" % reconstruction) ``` # Let's plot the training and validation statistics: ``` steps, training_ELBO = list(zip(*train_ELBOs)) _, training_KL = list(zip(*train_KLs)) _, val_ELBO = list(zip(*val_ELBOs)) _, val_KL = list(zip(*val_KLs)) epochs, val_ppl = list(zip(*val_perplexities)) _, val_NLL = list(zip(*val_NLLs)) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 5)) # Plot training ELBO and KL ax1.set_title("Training ELBO") ax1.plot(steps, training_ELBO, "-o") ax2.set_title("Training KL") ax2.plot(steps, training_KL, "-o") plt.show() # Plot validation ELBO and KL fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 5)) ax1.set_title("Validation ELBO") ax1.plot(steps, val_ELBO, "-o", color="orange") ax2.set_title("Validation KL") ax2.plot(steps, val_KL, "-o", color="orange") plt.show() # Plot validation perplexities. fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 5)) ax1.set_title("Validation perplexity") ax1.plot(epochs, val_ppl, "-o", color="orange") ax2.set_title("Validation NLL") ax2.plot(epochs, val_NLL, "-o", color="orange") plt.show() print() ``` Let's load the best model according to validation perplexity and compute its perplexity on the test data: ``` # Load the best model from disk. model = LatentFactorModel(vocab_size=vocab.size(), emb_size=emb_size, hidden_size=hidden_size, latent_size=latent_size, pad_idx=vocab[PAD_TOKEN], dropout=dropout) model.load_state_dict(torch.load(best_model)) model = model.to(device) # Compute test perplexity and ELBO. test_perplexity, test_NLL = eval_perplexity(model, inference_model, test_dataset, vocab, device, n_importance_samples) test_ELBO, test_KL = eval_elbo(model, inference_model, test_dataset, vocab, device) print("test ELBO (KL) = %.2f (%.2f) -- test perplexity = %.2f -- test NLL = %.2f" % (test_ELBO, test_KL, test_perplexity, test_NLL)) ``` # Qualitative analysis Let's have a look at what how our trained model interacts with the learned latent space. First let's greedily decode some samples from the prior to assess the diversity of the model: ``` # Generate 10 samples from the standard normal prior. num_prior_samples = 10 pz = ProductOfBernoullis(torch.ones(num_prior_samples, latent_size) * 0.5) z = pz.sample() z = z.to(device) # Use the greedy decoding algorithm to generate words. predictions = greedy_decode(model, z, vocab) predictions = batch_to_words(predictions, vocab) for num, prediction in enumerate(predictions): print("%d: %s" % (num+1, prediction)) ``` Let's now have a look how good the model is at reconstructing words from the test dataset using the approximate posterior mean and a couple of samples: ``` # Pick a random test word. test_word = test_dataset[np.random.choice(len(test_dataset))] # Infer q(z|x). x_in, _, seq_mask, seq_len = create_batch([test_word], vocab, device) qz = inference_model(x_in, seq_mask, seq_len) # Decode using the mean. z_mean = qz.mean() mean_reconstruction = greedy_decode(model, z_mean, vocab) mean_reconstruction = batch_to_words(mean_reconstruction, vocab)[0] print("Original: \"%s\"" % test_word) print("Posterior mean reconstruction: \"%s\"" % mean_reconstruction) # Decode a couple of samples from the approximate posterior. for s in range(3): z = qz.sample() sample_reconstruction = greedy_decode(model, z, vocab) sample_reconstruction = batch_to_words(sample_reconstruction, vocab)[0] print("Posterior sample reconstruction (%d): \"%s\"" % (s+1, sample_reconstruction)) ``` We can also qualitatively assess the smoothness of the learned latent space by interpolating between two words in the test set: ``` # Pick a random test word. test_word_1 = test_dataset[np.random.choice(len(test_dataset))] # Infer q(z|x). x_in, _, seq_mask, seq_len = create_batch([test_word_1], vocab, device) qz = inference_model(x_in, seq_mask, seq_len) qz_1 = qz.mean() # Pick a random second test word. test_word_2 = test_dataset[np.random.choice(len(test_dataset))] # Infer q(z|x) again. x_in, _, seq_mask, seq_len = create_batch([test_word_2], vocab, device) qz = inference_model(x_in, seq_mask, seq_len) qz_2 = qz.mean() # Now interpolate between the two means and generate words between those. num_words = 5 print("Word 1: \"%s\"" % test_word_1) for alpha in np.linspace(start=0., stop=1., num=num_words): z = (1-alpha) * qz_1 + alpha * qz_2 reconstruction = greedy_decode(model, z, vocab) reconstruction = batch_to_words(reconstruction, vocab)[0] print("(1-%.2f) * qz1.mean + %.2f qz2.mean: \"%s\"" % (alpha, alpha, reconstruction)) print("Word 2: \"%s\"" % test_word_2) ```
github_jupyter
# AU Fundamentals of Python Programming-W10X ## Topic 1(主題1)-字串和print()的參數 ### Step 1: Hello World with 其他參數 sep = "..." 列印分隔 end="" 列印結尾 * sep: string inserted between values, default a space. * end: string appended after the last value, default a newline. ``` print('Hello World!') #'Hello World!' is the same as "Hello World!" help(print) #註解是不會執行的 print('Hello '+'World!') print("Hello","World", sep="+") print("Hello"); print("World!") print("Hello", end=' ');print("World") ``` ### Step 2: Escape Sequence (逸出序列) * \newline Ignored * \\ Backslash (\) * \' Single quote (') * \" Double quote (") * \a ASCII Bell (BEL) * \b ASCII Backspace (BS) * \n ASCII Linefeed (LF) * \r ASCII Carriage Return (CR) * \t ASCII Horizontal Tab (TAB) * \ooo ASCII character with octal value ooo * \xhh... ASCII character with hex value hh... ``` print("Hello\nWorld!") print("Hello","World!", sep="\n") txt = "We are the so-called \"Vikings\" from the north." print(txt) ``` ### Step 3: 使用 字串尾部的\來建立長字串 ``` iPhone11='iPhone 11是由蘋果公司設計和銷售的智能手機,為第13代iPhone系列智能手機之一,亦是iPhone XR的後繼機種。\ 其在2019年9月10日於蘋果園區史蒂夫·喬布斯劇院由CEO蒂姆·庫克隨iPhone 11 Pro及iPhone 11 Pro Max一起發佈,\ 並於2019年9月20日在世界大部分地區正式發售。其採用類似iPhone XR的玻璃配鋁金屬設計;\ 具有6.1英吋Liquid Retina HD顯示器,配有Face ID;並採用由蘋果自家設計的A13仿生晶片,\ 帶有第三代神經網絡引擎。機器能夠防濺、耐水及防塵,在最深2米的水下停留時間最長可達30分鐘。' print(iPhone11) ``` ### Step 4: 使用六個雙引號來建立長字串 ''' ... ''' 或 """ ... """ ``` iPhone11=''' iPhone 11是由蘋果公司設計和銷售的智能手機,為第13代iPhone系列智能手機之一,亦是iPhone XR的後繼機種。 其在2019年9月10日於蘋果園區史蒂夫·喬布斯劇院由CEO蒂姆·庫克隨iPhone 11 Pro及iPhone 11 Pro Max一起發佈, 並於2019年9月20日在世界大部分地區正式發售。其採用類似iPhone XR的玻璃配鋁金屬設計; 具有6.1英吋Liquid Retina HD顯示器,配有Face ID;並採用由蘋果自家設計的A13仿生晶片, 帶有第三代神經網絡引擎。機器能夠防濺、耐水及防塵,在最深2米的水下停留時間最長可達30分鐘。''' print(iPhone11) iPhone11=""" iPhone 11是由蘋果公司設計和銷售的智能手機,為第13代iPhone系列智能手機之一,亦是iPhone XR的後繼機種。 其在2019年9月10日於蘋果園區史蒂夫·喬布斯劇院由CEO蒂姆·庫克隨iPhone 11 Pro及iPhone 11 Pro Max一起發佈, 並於2019年9月20日在世界大部分地區正式發售。其採用類似iPhone XR的玻璃配鋁金屬設計; 具有6.1英吋Liquid Retina HD顯示器,配有Face ID;並採用由蘋果自家設計的A13仿生晶片, 帶有第三代神經網絡引擎。機器能夠防濺、耐水及防塵,在最深2米的水下停留時間最長可達30分鐘。""" print(iPhone11) ``` ## Topic 2(主題2)-型別轉換函數 ### Step 5: 輸入變數的值 ``` name = input('Please input your name:') print('Hello, ', name) print(type(name)) #列印變數的型別 ``` ### Step 6: Python 型別轉換函數 * int() #變整數 * float() #變浮點數 * str() #變字串 * 變數名稱=int(字串變數) * 變數名稱=str(數值變數 ``` #變數宣告 varA = 66 #宣告一個整數變數 varB = 1.68 #宣告一個有小數的變數(電腦叫浮點數) varC = 'GoPython' #宣告一個字串變數 varD = str(varA) #將整數88轉成字串的88 varE = str(varB) #將浮點數1.68轉成字串的1.68 varF = int('2019') #將字串2019轉作整數數值的2019 varG = float('3.14') #將字串3.14轉作浮點數數值的3.14 score = input('Please input your score:') score = int(score) print(type(score)) #列印變數的型別 ``` ## Topic 3(主題3)-索引和切片 ``` a = "Hello, World!" print(a[1]) #Indexing print(a[2:5]) #Slicing ``` ### Step 8: 索引(Indexing) ``` a = "Hello Wang" d = "0123456789" print(a[3]) #Indexing a = "Hello Wang" d = "0123456789" print(a[-3]) #Negative Indexing ``` ### Step 9: 切片(Slicing) ``` a = "Hello Wang" d = "0123456789" print(a[2:5]) #Slicing a = "Hello Wang" d = "0123456789" print(a[2:]) #Slicing a = "Hello Wang" d = "0123456789" print(a[:5]) #Slicing a = "Hello Wang" d = "0123456789" print(a[-6:-2]) #Slicing a = "Hello Wang" d = "0123456789" print(a[-4:]) #Slicing ``` ## Topic 4(主題4)-格式化輸出 ``` A = 435; B = 59.058 print('Art: %5d, Price per Unit: %8.2f' % (A, B)) #%-formatting 格式化列印 print("Art: {0:5d}, Price per Unit: {1:8.2f}".format(A,B)) #str-format(Python 2.6+) print(f"Art:{A:5d}, Price per Unit: {B:8.2f}") #f-string (Python 3.6+) ``` ### Step 10: %-formatting 格式化列印 透過% 運算符號,將在元組(tuple)中的一組變量依照指定的格式化方式輸出。如 %s(字串)、%d (十進位整數)、 %f(浮點數) ``` A = 435; B = 59.058 print('Art: %5d, Price per Unit: %8.2f' % (A, B)) FirstName = "Mary"; LastName= "Lin" print("She is %s %s" %(FirstName, LastName)) ``` ### Step 11: str-format(Python 2.6+)格式化列印 ``` A = 435; B = 59.058 print("Art: {0:5d}, Price per Unit: {1:8.2f}".format(435, 59.058)) FirstName = "Mary"; LastName= "Lin" print("She is {} {}".format(FirstName, LastName)) ``` ### Step 12: f-string (Python 3.6+)格式化列印 ``` A = 435; B = 59.058 print(f"Art:{A:5d}, Price per Unit: {B:8.2f}") FirstName = "Mary"; LastName= "Lin" print(f"She is {FirstName} {LastName}") ```
github_jupyter
## Topic Modelling (joint plots by quality band) Shorter notebook just for Figures 9 and 10 in the paper. ``` %matplotlib inline import matplotlib.pyplot as plt # magics and warnings %load_ext autoreload %autoreload 2 import warnings; warnings.simplefilter('ignore') import os, random from tqdm import tqdm import pandas as pd import numpy as np seed = 43 random.seed(seed) np.random.seed(seed) import nltk, gensim, sklearn, spacy from gensim.models import CoherenceModel import matplotlib.pyplot as plt import pyLDAvis.gensim import seaborn as sns sns.set(style="white") ``` ### Load the dataset Created with the main Topic Modelling notebook. ``` bands_data = {x:dict() for x in range(1,5)} import pickle, os for band in range(1,5): with open("trove_overproof/models/hum_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["model_human"] = pickle.load(handle) with open("trove_overproof/models/corpus_hum_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["corpus_human"] = pickle.load(handle) with open("trove_overproof/models/dictionary_hum_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["dictionary_human"] = pickle.load(handle) with open("trove_overproof/models/ocr_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["model_ocr"] = pickle.load(handle) with open("trove_overproof/models/corpus_ocr_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["corpus_ocr"] = pickle.load(handle) with open("trove_overproof/models/dictionary_ocr_band_%d.pkl"%band, 'rb') as handle: bands_data[band]["dictionary_ocr"] = pickle.load(handle) ``` ### Evaluation #### Intrinsic eval See http://qpleple.com/topic-coherence-to-evaluate-topic-models. ``` for band in range(1,5): print("Quality band",band) # Human # Compute Perplexity print('\nPerplexity (Human): ', bands_data[band]["model_human"].log_perplexity(bands_data[band]["corpus_human"])) # a measure of how good the model is. The lower the better. # Compute Coherence Score coherence_model_lda = CoherenceModel(model=bands_data[band]["model_human"], corpus=bands_data[band]["corpus_human"], dictionary=bands_data[band]["dictionary_human"], coherence='u_mass') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score (Human): ', coherence_lda) # OCR # Compute Perplexity print('\nPerplexity (OCR): ', bands_data[band]["model_ocr"].log_perplexity(bands_data[band]["corpus_ocr"])) # a measure of how good the model is. The lower the better. # Compute Coherence Score coherence_model_lda = CoherenceModel(model=bands_data[band]["model_ocr"], corpus=bands_data[band]["corpus_ocr"], dictionary=bands_data[band]["dictionary_ocr"], coherence='u_mass') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score (OCR): ', coherence_lda) print("==========\n") ``` #### Match of topics We match every topic in the OCR model with a topic in the human model (by best matching), and assess the overall distance between the two using the weighted total distance over a set of N top words (from the human model to the ocr model). The higher this value, the closest two topics are. Note that to find a matching, we create a weighted network and find the maximal bipartite matching using NetworkX. Afterwards, we can measure the distance of the best match, e.g., using the KL divergence (over the same set of words). ``` import networkx as nx from scipy.stats import entropy from collections import defaultdict # analyse matches distances = {x:list() for x in range(1,5)} n_words_in_common = {x:list() for x in range(1,5)} matches = {x:defaultdict(int) for x in range(1,5)} top_n = 500 for band in range(1,5): G = nx.Graph() model_human = bands_data[band]["model_human"] model_ocr = bands_data[band]["model_ocr"] # add bipartite nodes G.add_nodes_from(['h_'+str(t_h[0]) for t_h in model_human.show_topics(num_topics = -1, formatted=False, num_words=1)], bipartite=0) G.add_nodes_from(['o_'+str(t_o[0]) for t_o in model_ocr.show_topics(num_topics = -1, formatted=False, num_words=1)], bipartite=1) # add weighted edges for t_h in model_human.show_topics(num_topics = -1, formatted=False, num_words=top_n): for t_o in model_ocr.show_topics(num_topics = -1, formatted=False, num_words=top_n): # note that the higher the weight, the shorter the distance between the two distributions, so we do 1-weight to then do minimal matching words_of_h = [x[0] for x in t_h[1]] words_of_o = [x[0] for x in t_o[1]] weights_of_o = {x[0]:x[1] for x in t_o[1]} words_in_common = list(set(words_of_h).intersection(set(words_of_o))) # sum the weighted joint probability of every shared word in the two models avg_weight = 1 - sum([x[1]*weights_of_o[x[0]] for x in t_h[1] if x[0] in words_in_common]) G.add_edge('h_'+str(t_h[0]),'o_'+str(t_o[0]),weight=avg_weight) G.add_edge('o_'+str(t_o[0]),'h_'+str(t_h[0]),weight=avg_weight) bipartite_solution = nx.bipartite.matching.minimum_weight_full_matching(G) # calculate distances for match_h,match_o in bipartite_solution.items(): if match_h.startswith('o'): # to avoid repeating the matches (complete graph!) break matches[band][int(match_h.split("_")[1])] = int(match_o.split("_")[1]) m_h = model_human.show_topic(int(match_h.split("_")[1]), topn=top_n) m_o = model_ocr.show_topic(int(match_o.split("_")[1]), topn=top_n) weights_of_o = {x[0]:x[1] for x in m_o} words_of_h = [x[0] for x in m_h] words_of_o = [x[0] for x in m_o] words_in_common = list(set(words_of_h).intersection(set(words_of_o))) n_words_in_common[band].append(len(words_in_common)/top_n) dist_h = list() dist_o = list() for w in m_h: if w[0] in words_in_common: dist_h.append(w[1]) dist_o.append(weights_of_o[w[0]]) # normalize dist_h = dist_h/sum(dist_h) dist_o = dist_o/sum(dist_o) dist = entropy(dist_h,dist_o) distances[band].append(dist) sns.set_context("notebook", font_scale=1.2, rc={"lines.linewidth": 2.5}) # Figure 9 for band in range(1,5): sns.distplot(distances[band], hist=False, label="Quality band %d"%band) plt.xlim((0,1)) plt.xlabel("KL divergence between topics, V=%d."%top_n) plt.tight_layout() plt.savefig("figures/topic_modelling/KL_divergence_topics.pdf") # Figure 10 for band in range(1,5): sns.distplot(n_words_in_common[band], hist=False, label="Quality band %d"%band) plt.xlim((0,1)) plt.tight_layout() plt.savefig("figures/topic_modelling/Words_in_common_topics.pdf") ```
github_jupyter
+ This notebook is part of lecture 7 *Solving Ax=0, pivot variables, and special solutions* in the OCW MIT course 18.06 by Prof Gilbert Strang [1] + Created by me, Dr Juan H Klopper + Head of Acute Care Surgery + Groote Schuur Hospital + University Cape Town + <a href="mailto:juan.klopper@uct.ac.za">Email me with your thoughts, comments, suggestions and corrections</a> <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"><img alt="Creative Commons Licence" style="border-width:0" src="https://i.creativecommons.org/l/by-nc/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" href="http://purl.org/dc/dcmitype/InteractiveResource" property="dct:title" rel="dct:type">Linear Algebra OCW MIT18.06</span> <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">IPython notebook [2] study notes by Dr Juan H Klopper</span> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>. + [1] <a href="http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/index.htm">OCW MIT 18.06</a> + [2] Fernando Pérez, Brian E. Granger, IPython: A System for Interactive Scientific Computing, Computing in Science and Engineering, vol. 9, no. 3, pp. 21-29, May/June 2007, doi:10.1109/MCSE.2007.53. URL: http://ipython.org ``` from IPython.core.display import HTML, Image css_file = 'style.css' HTML(open(css_file, 'r').read()) #import numpy as np from sympy import init_printing, Matrix, symbols #import matplotlib.pyplot as plt #import seaborn as sns #from IPython.display import Image from warnings import filterwarnings init_printing(use_latex = 'mathjax') %matplotlib inline filterwarnings('ignore') ``` # Solving homogeneous systems # Pivot variables # Special solutions * We are trying to solve a system of linear equations * For homogeneous systems the right-hand side is the zero vector * Consider the example below ``` A = Matrix([[1, 2, 2, 2], [2, 4, 6, 8], [3, 6, 8, 10]]) A # A 3x4 matrix x1, x2, x3, x4 = symbols('x1, x2, x3, x4') x_vect = Matrix([x1, x2, x3, x4]) # A 4x1 matrix x_vect b = Matrix([0, 0, 0]) b # A 3x1 matrix ``` * The **x** column vector is a set of all the solutions to this homogeneous equation * It forms the nullspace * Note that the column vectors in A are not linearly independent * Performing elementary row operations leaves us with the matrix below * It has two pivots, which is termed **rank** 2 ``` A.rref() # rref being reduced row echelon form ``` * Which represents the following $$ { x }_{ 1 }\begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}+{ x }_{ 2 }\begin{bmatrix} 2 \\ 0 \\ 0 \end{bmatrix}+{ x }_{ 3 }\begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}+{ x }_{ 4 }\begin{bmatrix} -2 \\ 2 \\ 0 \end{bmatrix}=\begin{bmatrix} 0 \\ 0 \\ 0 \end{bmatrix}\\ { x }_{ 1 }+2{ x }_{ 2 }+0{ x }_{ 3 }-2{ x }_{ 4 }=0\\ 0{ x }_{ 1 }+0{ x }_{ 2 }+{ x }_{ 3 }+2{ x }_{ 4 }=0\\ { x }_{ 1 }+0{ x }_{ 2 }+0{ x }_{ 3 }+0{ x }_{ 4 }=0 $$ * We are free set a value for *x*<sub>4</sub>, let's sat *t* $$ { x }_{ 1 }+2{ x }_{ 2 }+0{ x }_{ 3 }-2{ x }_{ 4 }=0\\ 0{ x }_{ 1 }+0{ x }_{ 2 }+{ x }_{ 3 }+2t=0\\ { x }_{ 1 }+0{ x }_{ 2 }+0{ x }_{ 3 }+0{ x }_{ 4 }=0\\ \therefore \quad { x }_{ 3 }=-2t $$ * We will have to make *x*<sub>2</sub> equal to another variable, say *s* $$ { x }_{ 1 }+2s+0{ x }_{ 3 }-2t=0 $$ $$ \therefore \quad {x}_{1}=2t-2s $$ * This results in the following, which is the complete nullspace and has dimension 2 $$ \begin{bmatrix} { x }_{ 1 } \\ { x }_{ 2 } \\ { x }_{ 3 } \\ { x }_{ 4 } \end{bmatrix}=\begin{bmatrix} -2s+2t \\ s \\ -2t \\ t \end{bmatrix}=\begin{bmatrix} -2s \\ s \\ 0 \\ 0 \end{bmatrix}+\begin{bmatrix} 2t \\ 0 \\ -2t \\ t \end{bmatrix}=s\begin{bmatrix} -2 \\ 1 \\ 0 \\ 0 \end{bmatrix}+t\begin{bmatrix} 2 \\ 0 \\ -2 \\ 1 \end{bmatrix} $$ * From the above, we clearly have two vectors in the solution and we can take constant multiples of these to fill up our solution space (our nullspace) * We can easily calculate how many free variables we will have by subtracting the number of pivots (rank) from the number of variables (*x*) in **x** * Here we have 4 - 2 = 2 #### Example problem * Calculate **x** for the transpose of A above #### Solution ``` A_trans = A.transpose() # Creating a new matrix called A_trans and giving it the value of the inverse of A A_trans A_trans.rref() # In reduced row echelon form this would be the following matrix ``` * Remember this is 4 equations in 3 unknowns, i.e. $$ { x }_{ 1 }\begin{bmatrix} 1 \\ 0 \\ 0 \\ 0 \end{bmatrix}+{ x }_{ 2 }\begin{bmatrix} 0 \\ 1 \\ 0 \\ 0 \end{bmatrix}+{ x }_{ 3 }\begin{bmatrix} 1 \\ 1 \\ 0 \\ 0 \end{bmatrix}=\begin{bmatrix} 0 \\ 0 \\ 0 \\ 0 \end{bmatrix}\\ { x }_{ 1 }+0{ x }_{ 2 }+{ x }_{ 3 }=0\\ 0{ x }_{ 1 }+{ x }_{ 2 }+{ x }_{ 3 }=0\\ 0{ x }_{ 1 }+0{ x }_{ 2 }+0{ x }_{ 3 }=0\\ 0{ x }_{ 1 }+0{ x }_{ 2 }+0{ x }_{ 3 }=0 $$ * It seems we are free to choose a value for *x*<sub>3</sub> * Let's make is *t* $$ t\begin{bmatrix} 1 \\ 0 \\ 0 \\ 0 \end{bmatrix}-t\begin{bmatrix} 0 \\ 1 \\ 0 \\ 0 \end{bmatrix}+t\begin{bmatrix} 1 \\ 1 \\ 0 \\ 0 \end{bmatrix}=\begin{bmatrix} 0 \\ 0 \\ 0 \\ 0 \end{bmatrix}\\ { x }_{ 3 }=t\\ { x }_{ 1 }+0{ x }_{ 2 }+t=0\\ 0{ x }_{ 1 }+{ x }_{ 2 }+t=0\\ \therefore \quad { x }_{ 2 }=-t\\ \therefore \quad { x }_{ 1 }=-t\\ \begin{bmatrix} { x }_{ 1 } \\ { x }_{ 2 } \\ { x }_{ 3 } \end{bmatrix}=\begin{bmatrix} t \\ -t \\ t \end{bmatrix}=t\begin{bmatrix} 1 \\ -1 \\ 1 \end{bmatrix} $$ * We had *n* = 3 unknowns and *r* (rank) = 2 pivots * The solution set (nullspace) will thus have 1 variable (*t*) (3-2=1) * The third column is the sum of the first two, so only 2 columns are linearly independent * We thus expect 2 pivots and can predict the nullspace to have only 1 variable (i.e. it is one-dimensional)
github_jupyter
``` # !pip install simplejson from pymongo import MongoClient from pathlib import Path from tqdm.notebook import tqdm import numpy as np import simplejson as json import itertools from functools import cmp_to_key import networkx as nx from IPython.display import display, Image, JSON from ipywidgets import widgets, Image, HBox, VBox, Button, ButtonStyle, Layout, Box from lib.image_dedup import make_hashes, calculate_distance, hashes_diff from lib.PersistentSet import PersistentSet from lib.sort_things import post_score, sort_posts, sort_images from lib.parallel import parallel images_dir = Path('../images') handmade_dir = Path('./handmade') handmade_dir.mkdir(exist_ok=True) mongo_uri = json.load(open('./credentials/mongodb_credentials.json'))['uri'] mongo = MongoClient(mongo_uri) db = mongo['bad-vis'] posts = db['posts'] imagefiles = db['imagefiles'] imagemeta = db['imagemeta'] imagededup = db['imagededup'] imagededup.drop() for i in imagemeta.find(): imagededup.insert_one(i) ``` # Load image metadata ``` imageDedup = [m for m in imagemeta.find()] imageDedup.sort(key=lambda x: x['image_id']) phash_to_idx_mapping = {} for i in range(len(imageDedup)): phash = imageDedup[i]['phash'] l = phash_to_idx_mapping.get(phash, []) l.append(i) phash_to_idx_mapping[phash] = l def phash_to_idx (phash): return phash_to_idx_mapping.get(phash, None) image_id_to_idx_mapping = {imageDedup[i]['image_id']:i for i in range(len(imageDedup))} def image_id_to_idx (image_id): return image_id_to_idx_mapping.get(image_id, None) ``` # Calculate distance ## Hash distance ``` image_hashes = [make_hashes(m) for m in imageDedup] # distance = calculate_distance(image_hashes) distance = calculate_distance(image_hashes, hash_type='phash') # distance2 = np.ndarray([len(image_hashes), len(image_hashes)]) # for i in tqdm(range(len(image_hashes))): # for j in range(i+1): # diff = hashes_diff(image_hashes[i], image_hashes[j]) # distance2[i, j] = diff # distance2[j, i] = diff # np.array_equal(distance, distance2) # pdistance = calculate_distance(image_hashes, hash_type='phash') ``` ## Find duplicated pairs from distance matrix ``` def set_distance (hashes, value, mat=distance): phash_x = hashes[0] phash_y = phash_x if len(hashes) == 1 else hashes[1] idx_x = phash_to_idx(phash_x) idx_y = phash_to_idx(phash_y) if idx_x == None or idx_y == None: return for s in itertools.product(idx_x, idx_y): i, j = s mat[i, j] = value mat[j, i] = value def set_distance_pairs (phash_pairs, value, mat=distance): for p in phash_pairs: set_distance(list(p), value, mat=mat) auto_duplicated_image_phash_pairs = PersistentSet() auto_duplicated_image_phash_pairs.set_file(handmade_dir/'auto_duplicated_image_phash_pairs.json') for i in tqdm(range(distance.shape[0])): for j in range(i): if distance[i, j] <= 1: # checked, all distance <= 1 are duplicated auto_duplicated_image_phash_pairs.add(frozenset([imageDedup[i]['phash'], imageDedup[j]['phash']])) # for i in tqdm(range(pdistance.shape[0])): # for j in range(i): # if pdistance[i, j] <= 1: # checked, all distance <= 1 are duplicated # auto_duplicated_image_phash_pairs.add(frozenset([imageDedup[i]['phash'], imageDedup[j]['phash']])) auto_duplicated_image_phash_pairs.save() ``` ## Apply information from meta data ``` duplicated_post_image_phash_pairs = PersistentSet() duplicated_post_image_phash_pairs.set_file(handmade_dir/'duplicated_post_image_phash_pairs.json') for p in tqdm(posts.find()): if len(p.get('duplicated_posts', [])) == 0: continue dp_phashes = {i['phash'] for dp in p['duplicated_posts'] for i in imagemeta.find({'post_id': dp})} if len(dp_phashes) > 1: # print(f"More than 1 dp image {p['post_id']}") # print(f"{p['duplicated_posts']} {dp_phashes}") continue phashes = [i['phash'] for i in imagemeta.find({'post_id': p['post_id']})] if len(phashes) > 1: # print(f"More than 1 image {p['post_id']} {phashes}") continue for s in itertools.product(dp_phashes, phashes): fs = frozenset(s) if len(fs) > 1: duplicated_post_image_phash_pairs.add(fs) duplicated_post_image_phash_pairs.save() related_album_image_phash_pairs = PersistentSet() related_album_image_phash_pairs.set_file(handmade_dir/'related_album_image_phash_pairs.json') for album in tqdm({i['album'] for i in imagemeta.find({'album': {'$exists': True, '$ne': ''}})}): ra_phashes = [i['phash'] for i in imagemeta.find({'album': album})] if len(ra_phashes) <= 1: print(f"Only 1 or less image {album} {ra_phashes}") for s in itertools.product(ra_phashes, ra_phashes): fs = frozenset(s) if len(fs) > 1: related_album_image_phash_pairs.add(fs) related_album_image_phash_pairs.save() ``` ## Apply manual labeled data ``` duplicated_image_phash_pairs = PersistentSet.load_set(handmade_dir/'duplicated_image_phash_pairs.json') not_duplicated_image_phash_pairs = PersistentSet.load_set(handmade_dir/'not_duplicated_image_phash_pairs.json') related_image_phash_pairs = PersistentSet.load_set(handmade_dir/'related_image_phash_pairs.json') invalid_image_phashes = PersistentSet.load_set(handmade_dir/'invalid_image_phashes.json') set_distance_pairs(auto_duplicated_image_phash_pairs, 0) set_distance_pairs(duplicated_post_image_phash_pairs, 0) set_distance_pairs(duplicated_image_phash_pairs, 0) set_distance_pairs(not_duplicated_image_phash_pairs, 60) set_distance_pairs(related_album_image_phash_pairs, 60) set_distance_pairs(related_image_phash_pairs, 60) related_distance = np.full(distance.shape, 60) set_distance_pairs(related_album_image_phash_pairs, 0, mat=related_distance) set_distance_pairs(related_image_phash_pairs, 0, mat=related_distance) ``` # Human in the Loop ``` def make_dedup_box (idx_x, idx_y, default=None): image_x = imageDedup[idx_x] phash_x = image_x['phash'] image_y = imageDedup[idx_y] phash_y = image_y['phash'] hash_pair = frozenset([phash_x, phash_y]) yes_btn = widgets.Button(description="Duplicated", button_style='success') no_btn = widgets.Button(description="Not", button_style='info') related_btn = widgets.Button(description="Related", button_style='warning') invalid_x_btn = widgets.Button(description="X Invalid") invalid_y_btn = widgets.Button(description="Y Invalid") reset_btn = widgets.Button(description="Reset") output = widgets.Output() def on_yes (btn): with output: if hash_pair in not_duplicated_image_phash_pairs: not_duplicated_image_phash_pairs.persist_remove(hash_pair) print('-Not') duplicated_image_phash_pairs.persist_add(hash_pair) print('Duplicated') def on_no (btn): with output: if hash_pair in duplicated_image_phash_pairs: duplicated_image_phash_pairs.persist_remove(hash_pair) print('-Duplicated') not_duplicated_image_phash_pairs.persist_add(hash_pair) print('Not') def on_related (btn): with output: if hash_pair in not_duplicated_image_phash_pairs: not_duplicated_image_phash_pairs.persist_remove(hash_pair) print('-Not') related_image_phash_pairs.persist_add(hash_pair) print('Related') def on_invalid_x (btn): invalid_image_phashes.persist_add(phash_x) with output: print('Invalid X') def on_invalid_y (btn): invalid_image_phashes.persist_add(phash_y) with output: print('Invalid Y') def on_reset (btn): with output: if hash_pair in duplicated_image_phash_pairs: duplicated_image_phash_pairs.persist_remove(hash_pair) print('-Duplicated') if hash_pair in not_duplicated_image_phash_pairs: not_duplicated_image_phash_pairs.persist_remove(hash_pair) print('-Not') if hash_pair in related_image_phash_pairs: related_image_phash_pairs.persist_remove(hash_pair) print('-Related') if phash_x in invalid_image_phashes: invalid_image_phashes.persist_remove(phash_x) print('-Invalid X') if phash_y in invalid_image_phashes: invalid_image_phashes.persist_remove(phash_y) print('-Invalid Y') print('Reset') yes_btn.on_click(on_yes) no_btn.on_click(on_no) related_btn.on_click(on_related) invalid_x_btn.on_click(on_invalid_x) invalid_y_btn.on_click(on_invalid_y) reset_btn.on_click(on_reset) if default == 'no': on_no(None) elif default == 'yes': on_yes(None) return HBox([VBox([yes_btn, no_btn, related_btn, invalid_x_btn, invalid_y_btn, reset_btn, output]), widgets.Image(value=open(image_x['file_path'], 'rb').read(), width=250, height=150), widgets.Image(value=open(image_y['file_path'], 'rb').read(), width=250, height=150)]) def potential_duplicates (threshold): for i in range(distance.shape[0]): for j in range(i): if distance[i, j] <= threshold: phash_pair = frozenset([imageDedup[i]['phash'], imageDedup[j]['phash']]) if (phash_pair not in auto_duplicated_image_phash_pairs and phash_pair not in duplicated_post_image_phash_pairs and phash_pair not in duplicated_image_phash_pairs and phash_pair not in not_duplicated_image_phash_pairs and phash_pair not in related_album_image_phash_pairs and phash_pair not in related_image_phash_pairs): yield (i, j) distance_threshold = 10 pdup = potential_duplicates(distance_threshold) for i in range(10): try: next_pdup = next(pdup) except StopIteration: print('StopIteration') break idx_x, idx_y = next_pdup image_x = imageDedup[idx_x] image_y = imageDedup[idx_y] print(f"{idx_x} {idx_y} {distance[idx_x, idx_y]} {image_x['phash']} {image_y['phash']} {image_x['width']} {image_y['width']} {image_x['image_id']} {image_y['image_id']}") display(make_dedup_box(idx_x, idx_y, default=None if distance[idx_x, idx_y] < 6 else 'no')) # display(make_dedup_box(idx_x, idx_y, default='yes' if distance[idx_x, idx_y] < 9 else 'no')) ``` # Visually check images ## Images with high variability ``` # interested_phashes = set() # def potential_duplicates_high (threshold): # for i in range(distance.shape[0]): # for j in range(i): # if distance[i, j] >= threshold: # phash_pair = frozenset([imageDedup[i]['phash'], imageDedup[j]['phash']]) # if (phash_pair in duplicated_image_phash_pairs): # interested_phashes.add(imageDedup[i]['phash']) # interested_phashes.add(imageDedup[j]['phash']) # yield (i, j) # pduph = potential_duplicates_high(13) # for i in range(100): # try: # next_pdup = next(pduph) # except StopIteration: # print('StopIteration') # break # idx_x, idx_y = next_pdup # image_x = imageDedup[idx_x] # image_y = imageDedup[idx_y] # print(f"{idx_x} {idx_y} {distance[idx_x, idx_y]} {image_x['phash']} {image_y['phash']} {image_x['width']} {image_y['width']} {image_x['image_id']} {image_y['image_id']}") # display(make_dedup_box(idx_x, idx_y)) # invalid_image_phashes = set(json.load(open('handmade/invalid_image_phashes.json'))) # examined_images = [ # 'reddit/dataisugly/2o08rl_0', # manually downloaded # 'reddit/dataisugly/2nwubr_0', # manually downloaded # 'reddit/dataisugly/beivt8_0', # manually downloaded # 'reddit/dataisugly/683b4i_0', # manually downloaded # 'reddit/dataisugly/3zcw30_0', # manually downloaded # 'reddit/dataisugly/1oxrh5_0', # manually downloaded a higher resolution image # 'reddit/dataisugly/3or2g0_0', # manually downloaded # 'reddit/dataisugly/5iobqn_0', # manually downloaded # 'reddit/dataisugly/29fpuo_0', # manually downloaded # 'reddit/dataisugly/5xux1f_0', # manually downloaded # 'reddit/dataisugly/35lrw1_0', # manually downloaded # 'reddit/dataisugly/1bxhv2_0', # manually downloaded a higher resolution image # 'reddit/dataisugly/3peais_0', # manually downloaded # 'reddit/dataisugly/2vdk71_0', # manually downloaded # 'reddit/dataisugly/6b8w73_0', # manually downloaded # 'reddit/dataisugly/2w8pnr_0', # manually downloaded an image with more context # 'reddit/dataisugly/2dt19h_0', # manually downloaded # 'reddit/dataisugly/31tj8a_0', # manually downloaded # 'reddit/dataisugly/30smxr_0', # manually downloaded # 'reddit/dataisugly/30dbx6_0', # manually downloaded # 'reddit/dataisugly/561ytm_0', # manually downloaded # 'reddit/dataisugly/6q4tre_0', # manually downloaded # 'reddit/dataisugly/3icm4g_0', # manually downloaded # 'reddit/dataisugly/6z5v98_0', # manually downloaded # 'reddit/dataisugly/5fucjm_0', # manually downloaded # 'reddit/dataisugly/99bczz_0', # manually downloaded # 'reddit/dataisugly/2662wv_0', # manually downloaded # 'reddit/dataisugly/26otpi_0', # manually downloaded a higher resolution image # 'reddit/dataisugly/68scgb_0', # manually downloaded # 'reddit/dataisugly/et75qp_0', # manually downloaded # 'reddit/dataisugly/4c9zc1_0', # manually downloaded an image with more context # 'reddit/dataisugly/2525a5_0', # manually downloaded more images, but does not matched with the one with more context # 'reddit/dataisugly/2la7zt_0', # thumbnail alt # ] ``` ## Invalid images ``` # invalids = [] # for h in invalid_image_phashes: # invalid_images = [f for f in imagefiles.find({'phash': h})] # if len(invalid_images) > 0: # invalids.append(invalid_images[0]) # display(Box([widgets.Image(value=open(i['file_path'], 'rb').read(), width=100, height=100) for i in invalids], # layout=Layout(display='flex', flex_flow='row wrap'))) ``` # Consolidate ## Related images ``` related_images = [[imageDedup[idx]['image_id'] for idx in c] for c in nx.components.connected_components(nx.Graph(related_distance <= 1)) if len(c) > 1] len(related_images) for ids in related_images: for i in ids: imageMeta = imageDedup[image_id_to_idx(i)] ri = [r for r in set(imageMeta.get('related_images', []) + ids) if r != i] imagededup.update_one({'image_id': i}, {'$set': {'related_images': ri}}) ``` ## Duplicated images ``` excluding_image_phashes = PersistentSet.load_set(handmade_dir/'excluding_image_phashes.json') excluding_image_phashes.persist_add('c13e3ae10e70fd86') excluding_image_phashes.persist_add('fe81837a94e3807e') excluding_image_phashes.persist_add('af9da24292fae149') excluding_image_phashes.persist_add('ad87d2696738ca4c') excluding_image_phashes.persist_add('d25264dfa9659392') excluding_image_phashes.persist_add('964e3b3160e14f8f') class ImageDedup (): _attrs = [ 'id', 'post_id', 'datetime', 'url', 'title', 'content', 'author', 'removed', 'ups', 'num_comments', 'external_link', 'source', 'source_platform', 'source_url', 'tags', 'labels', 'media_type', 'thumbnail_url', 'preview_url', 'external_link_url', 'archive_url', 'thumbnail', 'preview', 'external_link', 'archive', 'manual', 'image_id', 'short_image_id', 'album', 'index_in_album', 'image_type', 'file_path', 'ext', 'animated', 'size', 'width', 'height', 'pixels', 'image_order', 'ahash', 'phash', 'pshash', 'dhash', 'whash', 'duplicated_posts', 'related_images', 'duplicated_images', 'popularity_score' ] def __init__ (self, imageMetas=[]): # print(imageMetas) if len(imageMetas) == 0: raise Exception('Empty imageFiles array.') self._imageMetas = imageMetas self._image_ids = [i['image_id'] for i in imageMetas] self._image_order = sort_images(self._imageMetas) self._post_ids = {i['post_id'] for i in imageMetas} self._posts = [posts.find_one({'post_id': i}) for i in self._post_ids] dpost = [] for p in self._posts: if 'duplicated_posts' in p: for i in p['duplicated_posts']: if i not in self._post_ids: dpost.append(posts.find_one({'post_id': i})) self._posts += dpost if None in self._posts: print(self._post_ids) self._post_order = sort_posts(self._posts) for k, v in self.main_image.items(): if k in ['duplicated_posts', 'related_images']: continue setattr(self, k, v) for k, v in self.main_post.items(): if k in ['duplicated_posts', 'related_images']: continue if k in ['preview', 'thumbnail', 'external_link', 'archive', 'manual']: setattr(self, f"{k}_url", v) else: setattr(self, k, v) def digest (self): return {a:getattr(self, a) for a in ImageDedup._attrs if hasattr(self, a)} @property def duplicated_posts (self): post_ids = self._post_ids.union(*[set(p.get('duplicated_posts', [])) for p in self._posts]) return [i for i in post_ids if i != self.post_id] @property def duplicated_images (self): return [i for i in self._image_ids if i != self.image_id] @property def related_images (self): return [ri for i in self._imageMetas for ri in i.get('related_images', []) if ri != self.image_id] @property def main_post (self): # if len(self._post_order) > 1 and self._post_order[0]['source_platform'] != 'reddit': # print(f"main post warning: {[p['post_id'] for p in self._post_order]}") return self._post_order[0] @property def popularity_score (self): return sum([post_score(p) for p in self._posts if p['source'] == 'dataisugly']) @property def main_image (self): # if len(self._image_order) > 1 and self._image_order[0]['source_platform'] != 'reddit': # print(f"main image warning: {[i['image_id'] for i in self._image_order]}") mi = [i for i in self._image_order if i['phash'] not in excluding_image_phashes][0] return mi duplicated_images = [list(set([imageDedup[idx]['image_id'] for idx in c])) for c in nx.components.connected_components(nx.Graph(distance <= 1))] # imageDedup[image_id_to_idx('reddit/AusFinance/fman6b_0')] def dedup_image (ids): imagedd = ImageDedup([imageDedup[image_id_to_idx(i)] for i in set(ids)]) # if imagedd.main_post['source'] != 'dataisugly': # print(f"Image not from dataisugly: {imagedd.main_post['post_id']}") for i in imagedd.duplicated_images: imagededup.delete_one({'image_id': i}) imagededup.replace_one({'image_id': imagedd.image_id}, imagedd.digest(), upsert=True) return imagedd imagedds = parallel(dedup_image, duplicated_images, n_jobs=-1) # duplicated_image_ids = [c # for c in nx.components.connected_components(nx.Graph(distance <= 1)) # if len(c) > 1] # start = 0 # # len(duplicated_image_ids) # cnt = 0 # end = start + 50 # for idxs in duplicated_image_ids: # # print(f"{[imageDedup[i]['image_id'] for i in idxs]}") # # if len(idxs) == 2: # if len(idxs) >= 4: # if cnt >= start: # print(*[imageDedup[i]['image_id'] for i in idxs]) # print(*[imageDedup[i]['phash'] for i in idxs]) # display(HBox([ # widgets.Image(value=open(imageDedup[i]['file_path'], 'rb').read(), width=100, height=100) # for i in idxs])) # cnt += 1 # if cnt >= end: # print(end) # start = end # break ```
github_jupyter
<a href="https://colab.research.google.com/github/mees/calvin/blob/main/RL_with_CALVIN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <h1>Reinforcement Learning with CALVIN</h1> The **CALVIN** simulated benchmark is perfectly suited for training agents with reinforcement learning, in this notebook we will demonstrate how to integrate your agents to these environments. ## Installation The first step is to install the CALVIN github repository such that we have access to the packages ``` # Download repo %mkdir /content/calvin %cd /content/calvin !git clone https://github.com/mees/calvin_env.git %cd /content/calvin/calvin_env !git clone https://github.com/lukashermann/tacto.git # Install packages %cd /content/calvin/calvin_env/tacto/ !pip3 install -e . %cd /content/calvin/calvin_env !pip3 install -e . !pip3 install -U numpy # Run this to check if the installation was succesful from calvin_env.envs.play_table_env import PlayTableSimEnv ``` ## Loading the environment After the installation has finished successfully, we can start using the environment for reinforcement Learning. To be able to use the environment we need to have the appropriate configuration that define the desired features, for this example, we will load the static and gripper camera. ``` %cd /content/calvin from hydra import initialize, compose with initialize(config_path="./calvin_env/conf/"): cfg = compose(config_name="config_data_collection.yaml", overrides=["cameras=static_and_gripper"]) cfg.env["use_egl"] = False cfg.env["show_gui"] = False cfg.env["use_vr"] = False cfg.env["use_scene_info"] = True print(cfg.env) ``` The environment has similar structure to traditional OpenAI Gym environments. * We can restart the simulation with the *reset* function. * We can perform an action in the environment with the *step* function. * We can visualize images taken from the cameras in the environment by using the *render* function. ``` import time import hydra import numpy as np from google.colab.patches import cv2_imshow env = hydra.utils.instantiate(cfg.env) observation = env.reset() #The observation is given as a dictionary with different values print(observation.keys()) for i in range(5): # The action consists in a pose displacement (position and orientation) action_displacement = np.random.uniform(low=-1, high=1, size=6) # And a binary gripper action, -1 for closing and 1 for oppening action_gripper = np.random.choice([-1, 1], size=1) action = np.concatenate((action_displacement, action_gripper), axis=-1) observation, reward, done, info = env.step(action) rgb = env.render(mode="rgb_array")[:,:,::-1] cv2_imshow(rgb) ``` ## Custom environment for Reinforcement Learning There are some aspects that needs to be defined to be able to use it for reinforcement learning, including: 1. Observation space 2. Action space 3. Reward function We are going to create a Custom environment that extends the **PlaytableSimEnv** to add these requirements. <br/> The specific task that will be solved is called "move_slider_left", here you can find a [list of possible tasks](https://github.com/mees/calvin_env/blob/main/conf/tasks/new_playtable_tasks.yaml) that can be evaluated using CALVIN. ``` from gym import spaces from calvin_env.envs.play_table_env import PlayTableSimEnv class SlideEnv(PlayTableSimEnv): def __init__(self, tasks: dict = {}, **kwargs): super(SlideEnv, self).__init__(**kwargs) # For this example we will modify the observation to # only retrieve the end effector pose self.action_space = spaces.Box(low=-1, high=1, shape=(7,)) self.observation_space = spaces.Box(low=-1, high=1, shape=(7,)) # We can use the task utility to know if the task was executed correctly self.tasks = hydra.utils.instantiate(tasks) def reset(self): obs = super().reset() self.start_info = self.get_info() return obs def get_obs(self): """Overwrite robot obs to only retrieve end effector position""" robot_obs, robot_info = self.robot.get_observation() return robot_obs[:7] def _success(self): """ Returns a boolean indicating if the task was performed correctly """ current_info = self.get_info() task_filter = ["move_slider_left"] task_info = self.tasks.get_task_info_for_set(self.start_info, current_info, task_filter) return 'move_slider_left' in task_info def _reward(self): """ Returns the reward function that will be used for the RL algorithm """ reward = int(self._success()) * 10 r_info = {'reward': reward} return reward, r_info def _termination(self): """ Indicates if the robot has reached a terminal state """ success = self._success() done = success d_info = {'success': success} return done, d_info def step(self, action): """ Performing a relative action in the environment input: action: 7 tuple containing Position x, y, z. Angle in rad x, y, z. Gripper action each value in range (-1, 1) output: observation, reward, done info """ # Transform gripper action to discrete space env_action = action.copy() env_action[-1] = (int(action[-1] >= 0) * 2) - 1 self.robot.apply_action(env_action) for i in range(self.action_repeat): self.p.stepSimulation(physicsClientId=self.cid) obs = self.get_obs() info = self.get_info() reward, r_info = self._reward() done, d_info = self._termination() info.update(r_info) info.update(d_info) return obs, reward, done, info ``` # Training an RL agent After generating the wrapper training a reinforcement learning agent is straightforward, for this example we will use stable baselines 3 agents ``` !pip3 install stable_baselines3 ``` To train the agent we create an instance of our new environment and send it to the stable baselines agent to learn a policy. > Note: the example uses Soft Actor Critic (SAC) which is one of the state of the art algorithm for off-policy RL. ``` import gym import numpy as np from stable_baselines3 import SAC new_env_cfg = {**cfg.env} new_env_cfg["tasks"] = cfg.tasks new_env_cfg.pop('_target_', None) new_env_cfg.pop('_recursive_', None) env = SlideEnv(**new_env_cfg) model = SAC("MlpPolicy", env, verbose=1) model.learn(total_timesteps=10000, log_interval=4) ```
github_jupyter
# Polish phonetic comparison > "Transcript matching for E2E ASR with phonetic post-processing" - toc: false - branch: master - hidden: true - categories: [asr, polish, phonetic, todo] ``` from difflib import SequenceMatcher import icu plipa = icu.Transliterator.createInstance('pl-pl_FONIPA') ``` The errors in E2E models are quite often phonetic confusions, so we do the opposite of traditional ASR and generate the phonetic representation from the output as a basis for comparison. ``` def phonetic_check(word1, word2, ignore_spaces=False): """Uses ICU's IPA transliteration to check if words are the same""" tl1 = plipa.transliterate(word1) if not ignore_spaces else plipa.transliterate(word1.replace(' ', '')) tl2 = plipa.transliterate(word2) if not ignore_spaces else plipa.transliterate(word2.replace(' ', '')) return tl1 == tl2 phonetic_check("jórz", "jusz", False) ``` The Polish `y` is phonetically a raised schwa; like the schwa in English, it's often deleted in fast speech. This function returns true if the only differences between the first word and the second is are deletions of `y`, except at the end of the word (which is typically the plural ending). ``` def no_igrek(word1, word2): """Checks if a word-internal y has been deleted""" sm = SequenceMatcher(None, word1, word2) for oc in sm.get_opcodes(): if oc[0] == 'equal': continue elif oc[0] == 'delete' and word1[oc[1]:oc[2]] != 'y': return False elif oc[0] == 'delete' and word1[oc[1]:oc[2]] == 'y' and oc[2] == len(word1): return False elif oc[0] == 'insert' or oc[0] == 'replace': return False return True no_igrek('uniwersytet', 'uniwerstet') no_igrek('uniwerstety', 'uniwerstet') phonetic_alternatives = [ ['u', 'ó'], ['rz', 'ż'] ] def reverse_alts(phonlist): return [ [i[1], i[0]] for i in phonlist ] sm = SequenceMatcher(None, "już", "jurz") for oc in sm.get_opcodes(): print(oc) ``` Reads a `CTM`-like file, returning a list of lists containing the filename, start time, end time, and word. ``` def read_ctmish(filename): output = [] with open(filename, 'r') as f: for line in f.readlines(): pieces = line.strip().split(' ') if len(pieces) <= 4: continue for piece in pieces[4:]: output.append([pieces[0], pieces[2], pieces[3], piece]) return output ``` Returns the contents of a plain text file as a list of lists containing the line number and the word, for use in locating mismatches ``` def read_text(filename): output = [] counter = 0 with open(filename, 'r') as f: for line in f.readlines(): counter += 1 for word in line.strip().split(' ') output.append([counter, word]) return output ctmish = read_ctmish("/mnt/c/Users/Jim O\'Regan/git/notes/PlgU9JyTLPE.ctm") rec_words = [i[3] for i in ctmish] ```
github_jupyter
This notebook compares the email activities and draft activites of an IETF working group. Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly. ``` import bigbang.mailman as mailman from bigbang.parse import get_date #from bigbang.functions import * from bigbang.archive import Archive from ietfdata.datatracker import * ``` Also, let's import a number of other dependencies we'll use later. ``` import pandas as pd import datetime import matplotlib.pyplot as plt import numpy as np import math import pytz import pickle import os ``` ## Load the HRPC Mailing List Now let's load the email data for analysis. ``` wg = "httpbisa" urls = [wg] archives = [Archive(url,mbox=True) for url in urls] activities = [arx.get_activity(resolved=False) for arx in archives] activity = activities[0] ``` ## Load IETF Draft Data Next, we will use the `ietfdata` tracker to look at the frequency of drafts for this working group. ``` import glob path = '../../archives/datatracker/httpbis/draft_metadata.csv' # use your path draft_df = pd.read_csv(path, index_col=None, header=0, parse_dates=['date']) ``` We will want to use the data of the drafts. Time resolution is too small. ``` draft_df['date'] = draft_df['date'].dt.date ``` ## Gender score and tendency measures This notebook uses the (notably imperfect) method of using first names to guess the gender of each draft author. ``` from gender_detector import gender_detector as gd detector = gd.GenderDetector('us') def gender_score(name): """ Takes a full name and returns a score for the guessed gender. 1 - male 0 - female .5 - unknown """ try: first_name = name.split(" ")[0] guess = detector.guess(first_name) score = 0 if guess == "male": return 1.0 elif guess == "female": return 0.0 else: # name does not have confidence to guesss return 0.5 except: # Some error, "unknown" return .5 ``` ## Gender guesses on mailing list activity Now to use the gender guesser to track the contributions by differently gendered participants over time. ``` from bigbang.parse import clean_name gender_activity = activity.groupby( by=lambda x: gender_score(clean_name(x)), axis=1).sum().rename({0.0 : "women", 0.5 : "unknown", 1.0 : "men"}, axis="columns") ``` Note that our gender scoring method currently is unable to get a clear guess for a large percentage of the emails! ``` print("%f.2 percent of emails are from an unknown gender." \ % (gender_activity["unknown"].sum() / gender_activity.sum().sum())) plt.bar(["women","unknown","men"],gender_activity.sum()) plt.title("Total emails sent by guessed gender") ``` ## Plotting Some preprocessing is necessary to get the drafts data ready for plotting. ``` from matplotlib import cm viridis = cm.get_cmap('viridis') drafts_per_day = draft_df.groupby('date').count()['title'] dpd_log = drafts_per_day.apply(lambda x: np.log1p(x)) ``` For each of the mailing lists we are looking at, plot the rolling average (over `window`) of number of emails sent per day. Then plot a vertical line with the height of the drafts count and colored by the gender tendency. ``` window = 100 plt.figure(figsize=(12, 6)) for i, gender in enumerate(gender_activity.columns): colors = [viridis(0), viridis(.5), viridis(.99)] ta = gender_activity[gender] rmta = ta.rolling(window).mean() rmtadna = rmta.dropna() plt.plot_date(np.array(rmtadna.index), np.array(rmtadna.values), color = colors[i], linestyle = '-', marker = None, label='%s email activity - %s' % (wg, gender), xdate=True) vax = plt.vlines(drafts_per_day.index, 0, drafts_per_day, colors = 'r', # draft_gt_per_day, cmap = 'viridis', label=f'{wg} drafts ({drafts_per_day.sum()} total)' ) plt.legend() plt.title("%s working group emails and drafts" % (wg)) #plt.colorbar(vax, label = "more womanly <-- Gender Tendency --> more manly") #plt.savefig("activites-marked.png") #plt.show() ``` ### Is gender diversity correlated with draft output? ``` from scipy.stats import pearsonr import pandas as pd def calculate_pvalues(df): df = df.dropna()._get_numeric_data() dfcols = pd.DataFrame(columns=df.columns) pvalues = dfcols.transpose().join(dfcols, how='outer') for r in df.columns: for c in df.columns: pvalues[r][c] = round(pearsonr(df[r], df[c])[1], 4) return pvalues drafts_per_ordinal_day = pd.Series({x[0].toordinal(): x[1] for x in drafts_per_day.items()}) drafts_per_ordinal_day ta.rolling(window).mean() garm = np.log1p(gender_activity.rolling(window).mean()) garm['diversity'] = (garm['unknown'] + garm['women']) / garm['men'] garm['drafts'] = drafts_per_ordinal_day garm['drafts'] = garm['drafts'].fillna(0) garm.corr(method='pearson') calculate_pvalues(garm) ``` Some variations... ``` garm_dna = garm.dropna(subset=['drafts']) ```
github_jupyter
# Fastpages Notebook Blog Post > A tutorial of fastpages for Jupyter notebooks. - toc: true - badges: true - comments: true - categories: [jupyter] - image: images/chart-preview.png # About This notebook is a demonstration of some of capabilities of [fastpages](https://github.com/fastai/fastpages) with notebooks. With `fastpages` you can save your jupyter notebooks into the `_notebooks` folder at the root of your repository, and they will be automatically be converted to Jekyll compliant blog posts! ## Front Matter Front Matter is a markdown cell at the beginning of your notebook that allows you to inject metadata into your notebook. For example: - Setting `toc: true` will automatically generate a table of contents - Setting `badges: true` will automatically include GitHub and Google Colab links to your notebook. - Setting `comments: true` will enable commenting on your blog post, powered by [utterances](https://github.com/utterance/utterances). More details and options for front matter can be viewed on the [front matter section](https://github.com/fastai/fastpages#front-matter-related-options) of the README. ## Markdown Shortcuts A `#hide` comment at the top of any code cell will hide **both the input and output** of that cell in your blog post. A `#hide_input` comment at the top of any code cell will **only hide the input** of that cell. ``` #hide_input print('The comment #hide_input was used to hide the code that produced this.\n') ``` put a `#collapse-hide` flag at the top of any cell if you want to **hide** that cell by default, but give the reader the option to show it: ``` #collapse-hide import pandas as pd import altair as alt ``` put a `#collapse-show` flag at the top of any cell if you want to **show** that cell by default, but give the reader the option to hide it: ``` #collapse-show cars = 'https://vega.github.io/vega-datasets/data/cars.json' movies = 'https://vega.github.io/vega-datasets/data/movies.json' sp500 = 'https://vega.github.io/vega-datasets/data/sp500.csv' stocks = 'https://vega.github.io/vega-datasets/data/stocks.csv' flights = 'https://vega.github.io/vega-datasets/data/flights-5k.json' ``` ## Interactive Charts With Altair Charts made with Altair remain interactive. Example charts taken from [this repo](https://github.com/uwdata/visualization-curriculum), specifically [this notebook](https://github.com/uwdata/visualization-curriculum/blob/master/altair_interaction.ipynb). ``` # hide df = pd.read_json(movies) # load movies data genres = df['Major_Genre'].unique() # get unique field values genres = list(filter(lambda d: d is not None, genres)) # filter out None values genres.sort() # sort alphabetically #hide mpaa = ['G', 'PG', 'PG-13', 'R', 'NC-17', 'Not Rated'] ``` ### Example 1: DropDown ``` # single-value selection over [Major_Genre, MPAA_Rating] pairs # use specific hard-wired values as the initial selected values selection = alt.selection_single( name='Select', fields=['Major_Genre', 'MPAA_Rating'], init={'Major_Genre': 'Drama', 'MPAA_Rating': 'R'}, bind={'Major_Genre': alt.binding_select(options=genres), 'MPAA_Rating': alt.binding_radio(options=mpaa)} ) # scatter plot, modify opacity based on selection alt.Chart(movies).mark_circle().add_selection( selection ).encode( x='Rotten_Tomatoes_Rating:Q', y='IMDB_Rating:Q', tooltip='Title:N', opacity=alt.condition(selection, alt.value(0.75), alt.value(0.05)) ) ``` ### Example 2: Tooltips ``` alt.Chart(movies).mark_circle().add_selection( alt.selection_interval(bind='scales', encodings=['x']) ).encode( x='Rotten_Tomatoes_Rating:Q', y=alt.Y('IMDB_Rating:Q', axis=alt.Axis(minExtent=30)), # use min extent to stabilize axis title placement tooltip=['Title:N', 'Release_Date:N', 'IMDB_Rating:Q', 'Rotten_Tomatoes_Rating:Q'] ).properties( width=600, height=400 ) ``` ### Example 3: More Tooltips ``` # select a point for which to provide details-on-demand label = alt.selection_single( encodings=['x'], # limit selection to x-axis value on='mouseover', # select on mouseover events nearest=True, # select data point nearest the cursor empty='none' # empty selection includes no data points ) # define our base line chart of stock prices base = alt.Chart().mark_line().encode( alt.X('date:T'), alt.Y('price:Q', scale=alt.Scale(type='log')), alt.Color('symbol:N') ) alt.layer( base, # base line chart # add a rule mark to serve as a guide line alt.Chart().mark_rule(color='#aaa').encode( x='date:T' ).transform_filter(label), # add circle marks for selected time points, hide unselected points base.mark_circle().encode( opacity=alt.condition(label, alt.value(1), alt.value(0)) ).add_selection(label), # add white stroked text to provide a legible background for labels base.mark_text(align='left', dx=5, dy=-5, stroke='white', strokeWidth=2).encode( text='price:Q' ).transform_filter(label), # add text labels for stock prices base.mark_text(align='left', dx=5, dy=-5).encode( text='price:Q' ).transform_filter(label), data=stocks ).properties( width=700, height=400 ) ``` ## Data Tables You can display tables per the usual way in your blog: ``` movies = 'https://vega.github.io/vega-datasets/data/movies.json' df = pd.read_json(movies) # display table with pandas df[['Title', 'Worldwide_Gross', 'Production_Budget', 'IMDB_Rating']].head() ``` ## Images ### Local Images You can reference local images and they will be copied and rendered on your blog automatically. You can include these with the following markdown syntax: `![](my_icons/fastai_logo.png)` ![](my_icons/fastai_logo.png) ### Remote Images Remote images can be included with the following markdown syntax: `![](https://image.flaticon.com/icons/svg/36/36686.svg)` ![](https://image.flaticon.com/icons/svg/36/36686.svg) ### Animated Gifs Animated Gifs work, too! `![](https://upload.wikimedia.org/wikipedia/commons/7/71/ChessPawnSpecialMoves.gif)` ![](https://upload.wikimedia.org/wikipedia/commons/7/71/ChessPawnSpecialMoves.gif) ### Captions You can include captions with markdown images like this: ``` ![](https://www.fast.ai/images/fastai_paper/show_batch.png "Credit: https://www.fast.ai/2020/02/13/fastai-A-Layered-API-for-Deep-Learning/") ``` ![](https://www.fast.ai/images/fastai_paper/show_batch.png "Credit: https://www.fast.ai/2020/02/13/fastai-A-Layered-API-for-Deep-Learning/") # Other Elements ## Tweetcards Typing `> twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20` will render this: > twitter: https://twitter.com/jakevdp/status/1204765621767901185?s=20 ## Youtube Videos Typing `> youtube: https://youtu.be/XfoYk_Z5AkI` will render this: > youtube: https://youtu.be/XfoYk_Z5AkI ## Boxes / Callouts Typing `> Warning: There will be no second warning!` will render this: > Warning: There will be no second warning! Typing `> Important: Pay attention! It's important.` will render this: > Important: Pay attention! It's important. Typing `> Tip: This is my tip.` will render this: > Tip: This is my tip. Typing `> Note: Take note of this.` will render this: > Note: Take note of this. Typing `> Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine.` will render in the docs: > Note: A doc link to [an example website: fast.ai](https://www.fast.ai/) should also work fine. ## Footnotes You can have footnotes in notebooks just like you can with markdown. For example, here is a footnote [^1]. [^1]: This is the footnote.
github_jupyter
# Coupling to Ideal Loads In this notebook, we investigate the WEST ICRH antenna behaviour when the front-face is considered as the combination of ideal (and independant) loads made of impedances all equal to $Z_s=R_c+j X_s$, where $R_c$ corresponds to the coupling resistance and $X_s$ is the strap reactance. <img src="West_front_face_ideal.png" width="300"/> In such case, the power delivered to the plasma/front-face is then: $$ P_t = \frac{1}{2} \sum_{i=1}^4 \Re[V_i I_i^* ] = \frac{1}{2} \sum_{i=1}^4 \Re[Z_i] |I_i|^2 = \frac{1}{2} R_c \sum_{i=1}^4 |I_i|^2 $$ Hence, we have defined the coupling resistance as: $$ R_c = \frac{\sum_{i=1}^4 \Re[Z_i] |I_i|^2}{\sum_{i=1}^4 |I_i|^2} $$ Inversely, the coupling resistance can be determine from: $$ R_c = \frac{2 P_t}{\sum_{i=1}^4 |I_i|^2} $$ In practice however, it is easier to measure RF voltages than currents. $$ I = \frac{V}{Z_s} = \frac{V}{R_c + j X_s} \rightarrow |I|^2 = \frac{|V|^2}{|R_c + j X_s|} \approx \frac{|V|^2}{|X_s|^2} $$ since in $|X_s|>>|R_c|$. The antenna model allows to calculate the coupling resistance from currents (`.Rc()` method) or from the voltage (`.Rc_WEST()` method). The strap reactance $X_s$ depends on the strap geometry and varies with the frequency. So, let's find how the strap reactance from the realistic CAD model. ``` %matplotlib widget import matplotlib.pyplot as plt import numpy as np import skrf as rf from tqdm.notebook import tqdm # WEST ICRH Antenna package import sys; sys.path.append('..') from west_ic_antenna import WestIcrhAntenna # styling the figures rf.stylely() ``` ## Coupling to an ideal front-face Coupling to an ideal front face of coupling resistance $R_c$ is easy using the the `.load()` method of the `WestIcrhAntenna` class. This method takes into account the strap reactance frequency fit (derived in [Strap Reactance Frequency Fit](./strap_reactance.ipynb)) ``` freq = rf.Frequency(30, 70, npoints=1001, unit='MHz') ant_ideal = WestIcrhAntenna(frequency=freq) ant_ideal.load(Rc=1) # 1 Ohm coupling resistance front-face # matching left and right sides : note that the solutions are (almost) the same f_match = 55.5e6 C_left = ant_ideal.match_one_side(f_match=f_match, side='left') C_right = ant_ideal.match_one_side(f_match=f_match, side='right') ``` At the difference of the "real" situation (see the [Matching](./matching.ipynb) or the [Coupling to a TOPICA plasma](./coupling_to_plasma_from_TOPICA.ipynb)), here is no poloidal neither toroidal coupling of the straps in this front-face model. This leads to: * Match soluitions are the same for both sides (within $10^{-3}$ pF). * Using the match solutions for each sides does not require to shift the operating frequency: ``` # dipole excitation power = [1, 1] phase = [0, rf.pi] # active S-parameter for the match point: C_match = [C_left[0], C_left[1], C_right[2], C_right[3]] s_act = ant_ideal.s_act(power, phase, Cs=C_match) fig, ax = plt.subplots() ax.plot(ant_ideal.f_scaled, 20*np.log10(np.abs(s_act)), lw=2) ax.legend(('$S_{act,1}$', '$S_{act,2}$')) ax.grid(True) ``` ## Match Points vs Coupling Resistance Let's determine the match points for a range of coupling resistance at a given frequency ``` f_match = 55e6 Rcs = np.r_[0.01, 0.05, np.arange(0.1, 2.5, 0.2)] C_matchs = [] ant = WestIcrhAntenna() for Rc in tqdm(Rcs): ant.load(Rc) C_match = ant.match_one_side(f_match=f_match) C_matchs.append(C_match) ``` As the coupling resistance increases, the distance between capacitances (Top vs Bottom) increases: ``` fig, ax = plt.subplots() ax.plot(Rcs, np.array(C_matchs)[:,0:2], lw=2, marker='o') ax.axhline(C_matchs[0][0], ls='--', color='C0') ax.axhline(C_matchs[0][1], ls='--', color='C1') ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('C [pF]') ax.legend(('Top', 'Bot')) ``` Displayed differently, the distance between capacitances (Top - Bottom) versus coupling resistance is: ``` delta_C_pos = np.array(C_matchs)[:,0] - C_matchs[0][0] delta_C_neg = C_matchs[0][1] - np.array(C_matchs)[:,1] fig, ax = plt.subplots() ax.plot(Rcs, delta_C_pos, label='Top: + $\Delta C$', lw=2) ax.plot(Rcs, delta_C_neg, label='Bot: - $\Delta C$', lw=2) ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('$\Delta C$ [pF]') ax.set_ylim(bottom=0) ax.set_xlim(left=0) ax.legend() ``` ## Load Resilience Curves Ideal loads is usefull to study the behaviour of the load tolerance property of the antenna and the capacitance match points. It is only necessary to work on half-antenna here, because there is no coupling between toroidal elements. Now that we have figured out the match points, let's vary the coupling resistances for a fixed match point and look to the return power (or VSWR): this will highlight the load resilience property of the antenna. ``` # create a single frequency point antenna to speed-up calculations ant = WestIcrhAntenna(frequency=rf.Frequency.from_f(f_match, unit='Hz')) fig, ax = plt.subplots() power = [1, 1] phase = [0, np.pi] for C_match in tqdm(C_matchs[0:8]): SWRs = [] ant.Cs = [C_match[0], C_match[1], 150, 150] for Rc in Rcs: ant.load(Rc) SWR = ant.circuit().network.s_vswr.squeeze()[0,0] SWRs.append(SWR) ax.plot(Rcs, np.array(SWRs), lw=2) ax.set_xlabel('Rc [Ohm]') ax.set_ylabel('VSWR') ax.set_ylim(1, 8) ax.axhline(2, color='r') ax.legend(Rcs) from IPython.core.display import HTML def _set_css_style(css_file_path): """ Read the custom CSS file and load it into Jupyter Pass the file path to the CSS file """ styles = open(css_file_path, "r").read() s = '<style>%s</style>' % styles return HTML(s) _set_css_style('custom.css') ```
github_jupyter
# 数组基础 ## 创建一个数组 ``` import numpy as np import pdir pdir(np) import numpy as np a1 = np.array([0, 1, 2, 3, 4])#将列表转换为数组,可以传递任何序列(类数组),而不仅仅是常见的列表(list)数据类型。 a2 = np.array((0, 1, 2, 3, 4))#将元组转换为数组 print 'a1:',a1,type(a1) print 'a2:',a2,type(a2) b = np.arange(5) #python内置函数range()的数组版,返回的是numpy ndarrays数组对象,而不是列表 print 'b:',b,type(b) c1 = np.ones((3,4))#根据元组指定形状,返回全1数组 c2 = np.ones_like(a1)#以另一个数组为参数,以其形状和dtype创建全1数组 print 'c1',c1,type(c1) print 'c2',c2,type(c2) d1 = np.zeros((5,6))#根据元组指定形状,返回全0数组 d2 = np.zeros_like(c1)#以另一个数组为参数,以其形状和dtype创建全0数组 print 'd1',d1,type(d1) print 'd2',d2,type(d2) e1 = np.empty((2,3))#创建新数组,只分配内存空间但不填充任何值,不是返回0,而是未初始化的垃圾值 e2 = np.empty_like(d1)# print 'e1',e1,type(e1) print 'e2',e2,type(e2) f1 = np.eye(3)#创建一个正方的N*N单位矩阵对角线为1,其余为0() f2 = np.identity(4)#Return the identity array. print 'f1',f1,type(f1) print 'f2',f2,type(f2) g = np.linspace(0, 10, 5) #linspace: Return evenly spaced numbers over a specified interval. print 'g',g,type(g) ``` ## 数组属性 ``` a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print(type(a)) #<type 'numpy.ndarray'> print(a.dtype) #int32 print(a.size) #25 Return the number of elements along a given axis. print(a.shape) #(5L, 5L),Return the shape of an array print(a.itemsize) #4,itemsize输出array元素的字节数,本例32/8=4 print(a.ndim) #2,Return the number of dimensions of an array ``` # 使用数组 数组可不必编写循环即可实现循环-数组的矢量化 大小相等的数组之间的任何数学运算都会应用到元素级 大小不相等的数组之间的运算-叫做广播 ## 基本操作符-数组四则运算 +、- 、/ ``` a = np.arange(25) print 'a:',a,type(a) a = a.reshape((5, 5))#Gives a new shape to an array without changing its data print 'a:',a,type(a) b = np.array([10, 62, 1, 14, 2, 56, 79, 2, 1, 45, 4, 92, 5, 55, 63, 43, 35, 6, 53, 24, 56, 3, 56, 44, 78]) print b.shape b = b.reshape((5,5)) print 'b:',b,type(b) print(a + b)#逐元素运算,分别对每一个元素进行配对,然后对它们进行运算 print(a - b) print(a * b) print(a / b) print(a ** 2) print(a < b) #逻辑运算符比如 “<” 和 “>” 的时候,返回的将是一个布尔型数组 print(a > b) print(a.dot(b))#dot() 函数计算两个数组的点积。它返回的是一个标量(只有大小没有方向的一个值)而不是数组 ``` ## 数组特殊运算符 ``` # sum, min, max, cumsum a = np.arange(10) print 'a:',a print(a.sum()) # >>>45 print(a.min()) # >>>0 print(a.max()) # >>>9 print(a.cumsum()) # >>>[ 0 1 3 6 10 15 21 28 36 45] ``` # 索引 ## 基本索引-整数索引 ``` a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print 'a:',a,type(a) # 访问元素:二者功能相同 print a[1][3] print a[1,3]#逗号隔开的索引列表来选取单个元素 # 多维数组中,如果省略了后面的索引,则返回对象是一个维度低一点的数组。 print a[2] ``` ## 数组切片 数组切片与列表切片重要区别在于。数组切片是原数组的视图,这意味着数据不会被复制,视图上的任何修改都会直接反映到源数组上 ``` a = np.array([[11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25], [26, 27, 28 ,29, 30], [31, 32, 33, 34, 35]]) print 'a:',a # 单纯切片只能得到相同维度的数组视图, print(a[::2,::2]) # [[11 13 15] # [21 23 25] # [31 33 35]] print(a[:3,:2]) #切片和整数索引混合使用,可以得到低维度的切片 print(a[0, 1:4]) # >>>[12 13 14] print(a[1:4, 0]) # >>>[16 21 26] print(a[:, 1]) # >>>[12 17 22 27 32] a_slice = a[:,0]#数组切片是原数组的视图,这意味着数据不会被复制,视图上的任何修改都会直接反映到源数组上 print 'a_slice:',a_slice a_slice[:] = 66 print 'a:',a #若想要数组切片的一份副本而非视图,则需要显式的进行复制操作 a_slice_copy = a[:,0].copy() print "a_slice_copy:",a_slice_copy a_slice_copy[:] = 111 print "a_slice_copy:",a_slice_copy print 'a:',a ``` ## 布尔型索引 布尔型数组可用于数组索引,通过布尔型数组获取到数组中的数据,将总是创建副本 ==,!=,>,< ``` #数组的比较运算也是矢量化的,会产生一个布尔型数组 names = np.array(['bob','joe','will','bob','will','joe','joe']) names.shape names == 'bob' data = np.random.randn(7,4) data #布尔型数组可用于数组索引,布尔型数组的长度必须跟被索引的轴长度一致, #例如names == 'bob'长度为7;data按行索引 #names2 == 'bob'长度为4;data按列索引 print data[names == 'bob'] names2 = names[:4].copy() print(names2) print data[:,names2=='bob'] # 布尔型数组还可以与切片、整数索引混合使用 print data[names == 'bob',1:3] print data[names == 'bob',1] # 注意:python关键字and or在布尔型数组中无效 #利用逻辑关系构造复杂布尔型数组,|或,&和,!非 mask = (names !='bob') print data[mask] mask = (names =='bob') | (names == 'will') print data[mask] # 通过布尔型数组设置值 data[data < 0] = 7 print data #通过布尔型数组设置整行/整列 data[names !='bob'] = 100 print data data[:,names2 !='bob'] = 200 print data ``` ## 花式索引 花式索引是指利用整数数组进行索引,可以指定顺序选取、 花式索引不同于切片,花式索引总是将数据复制到新数组中 ``` arr = np.empty((8,4)) for i in range(8): arr[i] = i print arr #可以指定顺序选取行子集,只需传入一个用于指定顺序的整数列表或数组即可 print arr[[4,3,0,6]] #使用负数索引将从末尾开始选取行 print arr[[-1,-2]] #可以同时使用正数和负数进行花式索引 print arr[[1,2,-1,-2]] #使用多个索引数组,返回的是一个一维数组 arr = np.arange(32).reshape(8,4) print arr print arr[[4,3,0,6],[1,2,3,0]] #获取的元素是(4,1)(3,2)(0,3)(6,0) #要想得到矩阵的行列子集(矩形区域) #方法1 print arr[[4,3,0,6]][:,[1,2,3,0]] #方法2 np.ix_()用两个一维整数数组转换为一个用于选取方形局域的索引器 print arr[np.ix_([4,3,0,6],[1,2,3,0])] ``` ## 数组转置和轴对换 ``` # 转置(transpose)返回源数据的视图(不会进行任何复制操作), # 两种办法实现转置: # arr.transpose()方法 # arr.T属性 arr = np.arange(15).reshape(3,5) print arr print arr.T print arr.transpose() #高维数组(>=3维)需要参数:轴编号组成的元组才能对轴进行转置 arr = np.arange(16).reshape(2,2,4) print arr print arr.T print arr.transpose((1,0,2)) ``` ## 通用函数-快速的元素级数组函数 对数组中数据进行元素级运算的函数 ``` #一元通用函数,接收一个数组 arr = np.arange(10) print arr print np.sqrt(arr)#开方 print np.exp(arr)#指数 print np.square(arr)#平方 #二元通用函数,接收2个数组 x = np.random.randn(5) print x y = np.random.randn(5) print y print np.add(x,y)#加法 print np.subtract(x,y)#减法 print np.greater(x,y)#元素比较 ``` ## 将条件逻辑表述为数组运算 numpy.where()函数是三元表达式 x if condition else y的矢量化版本 ``` xarr = np.array([1.1,1.2,1.3,1.4,1.5]) yarr = np.array([2.1,2.2,2.3,2.4,2.5]) condition = np.array([True,False,True,True,False]) # 列表生成式 print [(x if c else y)for x,y,c in zip(xarr,yarr,condition)] #存在问题 #速度不是很快,原因:纯python实现 #无法用于多维数组 #利用np.where()实现相同功能很简洁 print np.where(condition,xarr,yarr) #np.where()函数,第2,3参数不一定为数组,也可以为标量值 #where通常用于根据另一个数组生成一个新数组 arr = np.random.randn(4,4) print arr print np.where(arr > 0,1,-1)#根据arr原始元素>0,置位1;arr原始元素<0,置位-1 print np.where(arr > 0,1,arr)#根据arr原始元素>0,置位1;arr原始元素<0,保持不变 ``` ## 数学和统计方法 数组的数学函数对数组或数组某个轴向的数据进行统计计算; 既可以通过数据实例方法调用; 也可以当做nump顶级函数使用. ``` arr = np.arange(10).reshape(2,5) print arr print arr.mean()#计算数组平均值 print arr.mean(axis = 0)#可指定轴,用以统计该轴上的值 print arr.mean(axis = 1) print np.mean(arr) print np.mean(arr,axis = 0) print np.mean(arr,axis = 1) print arr.sum()#计算数组和 print arr.sum(axis = 0) print arr.sum(axis = 1) print np.sum(arr) print np.sum(arr,axis = 0) print np.sum(arr,axis = 1) print arr.var()#计算方差差 print arr.var(axis = 0)# print arr.var(axis = 1)# print np.var(arr)#计算方差差 print np.var(arr,axis = 0)# print np.var(arr,axis = 1)# print arr.std()#计算标准差 print arr.std(axis = 0)# print arr.std(axis = 1)# print np.std(arr)#计算标准差 print np.std(arr,axis = 0)# print np.std(arr,axis = 1)# print arr print arr.min()#计算最小值 print arr.min(axis = 0)# print arr.min(axis = 1)# print np.min(arr)# print np.min(arr,axis = 0)# print np.min(arr,axis = 1)# print arr print arr.max()#计算最大值 print arr.max(axis = 0)# print arr.max(axis = 1)# print np.max(arr) print np.max(arr,axis = 0)# print np.max(arr,axis = 1)# print arr print arr[0].argmin()#最小值的索引 print arr[1].argmin()#最小值的索引 print arr[0].argmax()#最大值的索引 print arr[1].argmax()#最大值的索引 print arr print arr.cumsum()#不聚合,所有元素的累积和,而是返回中间结果构成的数组 arr = arr + 1 print arr print arr.cumprod()#不聚合,所有元素的累积积,而是返回中间结果构成的数组 ``` ## 用于布尔型数组的方法 any()测试布尔型数组是否存在一个或多个True all()检查数组中所有值是否都为True ``` arr = np.random.randn(10) # print bools = arr > 0 print bools print bools.any() print np.any(bools) print bools.all() print np.all(bools) arr = np.array([0,1,2,3,4]) print arr.any()#非布尔型数组,所有非0元素会被当做True ``` ## 排序 ``` arr = np.random.randn(10) print arr arr.sort()#与python内置列表排序一样;就地排序,会修改数组本身 print arr arr = np.random.randn(10) print arr print np.sort(arr)#返回数组已排序副本 ``` ## 唯一化及其他的集合逻辑 numpy针对一维数组的基本集合运算 np.unique()找出数组中的唯一值,并返回已排序的结果 ``` names = np.array(['bob', 'joe', 'will', 'bob', 'will', 'joe', 'joe']) print names print np.unique(names) ``` ## 线性代数 ``` #矩阵乘法 x = np.array([[1,2,3],[4,5,6]]) y = np.array([[6,23],[-1,7],[8,9]]) print x print y print x.dot(y) print np.dot(x,y) ``` ## 随机数生成 numpy.random模块对python内置的random进行补充,增接了一些用于高效生成多生概率分布的样本值的函数 ``` #从给定的上下限范围内随机选取整数 print np.random.randint(10) #产生正态分布的样本值 print np.random.randn(3,2) ``` ### 数组组合 ``` a = np.array([1,2,3]) b = np.array([4,5,6]) c = np.arange(6).reshape(2,3) d = np.arange(2,8).reshape(2,3) print(a) print(b) print(c) print(d) np.concatenate([c,d]) # In machine learning, useful to enrich or # add new/concatenate features with hstack np.hstack([c, d]) # Use broadcasting when needed to do this automatically np.vstack([a,b, d]) ```
github_jupyter
# A Scientific Deep Dive Into SageMaker LDA 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Data Exploration](#DataExploration) 1. [Training](#Training) 1. [Inference](#Inference) 1. [Epilogue](#Epilogue) # Introduction *** Amazon SageMaker LDA is an unsupervised learning algorithm that attempts to describe a set of observations as a mixture of distinct categories. Latent Dirichlet Allocation (LDA) is most commonly used to discover a user-specified number of topics shared by documents within a text corpus. Here each observation is a document, the features are the presence (or occurrence count) of each word, and the categories are the topics. Since the method is unsupervised, the topics are not specified up front, and are not guaranteed to align with how a human may naturally categorize documents. The topics are learned as a probability distribution over the words that occur in each document. Each document, in turn, is described as a mixture of topics. This notebook is similar to **LDA-Introduction.ipynb** but its objective and scope are a different. We will be taking a deeper dive into the theory. The primary goals of this notebook are, * to understand the LDA model and the example dataset, * understand how the Amazon SageMaker LDA algorithm works, * interpret the meaning of the inference output. Former knowledge of LDA is not required. However, we will run through concepts rather quickly and at least a foundational knowledge of mathematics or machine learning is recommended. Suggested references are provided, as appropriate. ``` !conda install -y scipy %matplotlib inline import os, re, tarfile import boto3 import matplotlib.pyplot as plt import mxnet as mx import numpy as np np.set_printoptions(precision=3, suppress=True) # some helpful utility functions are defined in the Python module # "generate_example_data" located in the same directory as this # notebook from generate_example_data import ( generate_griffiths_data, match_estimated_topics, plot_lda, plot_lda_topics) # accessing the SageMaker Python SDK import sagemaker from sagemaker.amazon.common import numpy_to_record_serializer from sagemaker.predictor import csv_serializer, json_deserializer ``` # Setup *** *This notebook was created and tested on an ml.m4.xlarge notebook instance.* We first need to specify some AWS credentials; specifically data locations and access roles. This is the only cell of this notebook that you will need to edit. In particular, we need the following data: * `bucket` - An S3 bucket accessible by this account. * Used to store input training data and model data output. * Should be withing the same region as this notebook instance, training, and hosting. * `prefix` - The location in the bucket where this notebook's input and and output data will be stored. (The default value is sufficient.) * `role` - The IAM Role ARN used to give training and hosting access to your data. * See documentation on how to create these. * The script below will try to determine an appropriate Role ARN. ``` from sagemaker import get_execution_role role = get_execution_role() bucket = '<your_s3_bucket_name_here>' prefix = 'sagemaker/lda_science' print('Training input/output will be stored in {}/{}'.format(bucket, prefix)) print('\nIAM Role: {}'.format(role)) ``` ## The LDA Model As mentioned above, LDA is a model for discovering latent topics describing a collection of documents. In this section we will give a brief introduction to the model. Let, * $M$ = the number of *documents* in a corpus * $N$ = the average *length* of a document. * $V$ = the size of the *vocabulary* (the total number of unique words) We denote a *document* by a vector $w \in \mathbb{R}^V$ where $w_i$ equals the number of times the $i$th word in the vocabulary occurs within the document. This is called the "bag-of-words" format of representing a document. $$ \underbrace{w}_{\text{document}} = \overbrace{\big[ w_1, w_2, \ldots, w_V \big] }^{\text{word counts}}, \quad V = \text{vocabulary size} $$ The *length* of a document is equal to the total number of words in the document: $N_w = \sum_{i=1}^V w_i$. An LDA model is defined by two parameters: a topic-word distribution matrix $\beta \in \mathbb{R}^{K \times V}$ and a Dirichlet topic prior $\alpha \in \mathbb{R}^K$. In particular, let, $$\beta = \left[ \beta_1, \ldots, \beta_K \right]$$ be a collection of $K$ *topics* where each topic $\beta_k \in \mathbb{R}^V$ is represented as probability distribution over the vocabulary. One of the utilities of the LDA model is that a given word is allowed to appear in multiple topics with positive probability. The Dirichlet topic prior is a vector $\alpha \in \mathbb{R}^K$ such that $\alpha_k > 0$ for all $k$. # Data Exploration --- ## An Example Dataset Before explaining further let's get our hands dirty with an example dataset. The following synthetic data comes from [1] and comes with a very useful visual interpretation. > [1] Thomas Griffiths and Mark Steyvers. *Finding Scientific Topics.* Proceedings of the National Academy of Science, 101(suppl 1):5228-5235, 2004. ``` print('Generating example data...') num_documents = 6000 known_alpha, known_beta, documents, topic_mixtures = generate_griffiths_data( num_documents=num_documents, num_topics=10) num_topics, vocabulary_size = known_beta.shape # separate the generated data into training and tests subsets num_documents_training = int(0.9*num_documents) num_documents_test = num_documents - num_documents_training documents_training = documents[:num_documents_training] documents_test = documents[num_documents_training:] topic_mixtures_training = topic_mixtures[:num_documents_training] topic_mixtures_test = topic_mixtures[num_documents_training:] print('documents_training.shape = {}'.format(documents_training.shape)) print('documents_test.shape = {}'.format(documents_test.shape)) ``` Let's start by taking a closer look at the documents. Note that the vocabulary size of these data is $V = 25$. The average length of each document in this data set is 150. (See `generate_griffiths_data.py`.) ``` print('First training document =\n{}'.format(documents_training[0])) print('\nVocabulary size = {}'.format(vocabulary_size)) print('Length of first document = {}'.format(documents_training[0].sum())) average_document_length = documents.sum(axis=1).mean() print('Observed average document length = {}'.format(average_document_length)) ``` The example data set above also returns the LDA parameters, $$(\alpha, \beta)$$ used to generate the documents. Let's examine the first topic and verify that it is a probability distribution on the vocabulary. ``` print('First topic =\n{}'.format(known_beta[0])) print('\nTopic-word probability matrix (beta) shape: (num_topics, vocabulary_size) = {}'.format(known_beta.shape)) print('\nSum of elements of first topic = {}'.format(known_beta[0].sum())) ``` Unlike some clustering algorithms, one of the versatilities of the LDA model is that a given word can belong to multiple topics. The probability of that word occurring in each topic may differ, as well. This is reflective of real-world data where, for example, the word *"rover"* appears in a *"dogs"* topic as well as in a *"space exploration"* topic. In our synthetic example dataset, the first word in the vocabulary belongs to both Topic #1 and Topic #6 with non-zero probability. ``` print('Topic #1:\n{}'.format(known_beta[0])) print('Topic #6:\n{}'.format(known_beta[5])) ``` Human beings are visual creatures, so it might be helpful to come up with a visual representation of these documents. In the below plots, each pixel of a document represents a word. The greyscale intensity is a measure of how frequently that word occurs within the document. Below we plot the first few documents of the training set reshaped into 5x5 pixel grids. ``` %matplotlib inline fig = plot_lda(documents_training, nrows=3, ncols=4, cmap='gray_r', with_colorbar=True) fig.suptitle('$w$ - Document Word Counts') fig.set_dpi(160) ``` When taking a close look at these documents we can see some patterns in the word distributions suggesting that, perhaps, each topic represents a "column" or "row" of words with non-zero probability and that each document is composed primarily of a handful of topics. Below we plots the *known* topic-word probability distributions, $\beta$. Similar to the documents we reshape each probability distribution to a $5 \times 5$ pixel image where the color represents the probability of that each word occurring in the topic. ``` %matplotlib inline fig = plot_lda(known_beta, nrows=1, ncols=10) fig.suptitle(r'Known $\beta$ - Topic-Word Probability Distributions') fig.set_dpi(160) fig.set_figheight(2) ``` These 10 topics were used to generate the document corpus. Next, we will learn about how this is done. ## Generating Documents LDA is a generative model, meaning that the LDA parameters $(\alpha, \beta)$ are used to construct documents word-by-word by drawing from the topic-word distributions. In fact, looking closely at the example documents above you can see that some documents sample more words from some topics than from others. LDA works as follows: given * $M$ documents $w^{(1)}, w^{(2)}, \ldots, w^{(M)}$, * an average document length of $N$, * and an LDA model $(\alpha, \beta)$. **For** each document, $w^{(m)}$: * sample a topic mixture: $\theta^{(m)} \sim \text{Dirichlet}(\alpha)$ * **For** each word $n$ in the document: * Sample a topic $z_n^{(m)} \sim \text{Multinomial}\big( \theta^{(m)} \big)$ * Sample a word from this topic, $w_n^{(m)} \sim \text{Multinomial}\big( \beta_{z_n^{(m)}} \; \big)$ * Add to document The [plate notation](https://en.wikipedia.org/wiki/Plate_notation) for the LDA model, introduced in [2], encapsulates this process pictorially. ![](http://scikit-learn.org/stable/_images/lda_model_graph.png) > [2] David M Blei, Andrew Y Ng, and Michael I Jordan. Latent Dirichlet Allocation. Journal of Machine Learning Research, 3(Jan):993–1022, 2003. ## Topic Mixtures For the documents we generated above lets look at their corresponding topic mixtures, $\theta \in \mathbb{R}^K$. The topic mixtures represent the probablility that a given word of the document is sampled from a particular topic. For example, if the topic mixture of an input document $w$ is, $$\theta = \left[ 0.3, 0.2, 0, 0.5, 0, \ldots, 0 \right]$$ then $w$ is 30% generated from the first topic, 20% from the second topic, and 50% from the fourth topic. In particular, the words contained in the document are sampled from the first topic-word probability distribution 30% of the time, from the second distribution 20% of the time, and the fourth disribution 50% of the time. The objective of inference, also known as scoring, is to determine the most likely topic mixture of a given input document. Colloquially, this means figuring out which topics appear within a given document and at what ratios. We will perform infernece later in the [Inference](#Inference) section. Since we generated these example documents using the LDA model we know the topic mixture generating them. Let's examine these topic mixtures. ``` print('First training document =\n{}'.format(documents_training[0])) print('\nVocabulary size = {}'.format(vocabulary_size)) print('Length of first document = {}'.format(documents_training[0].sum())) print('First training document topic mixture =\n{}'.format(topic_mixtures_training[0])) print('\nNumber of topics = {}'.format(num_topics)) print('sum(theta) = {}'.format(topic_mixtures_training[0].sum())) ``` We plot the first document along with its topic mixture. We also plot the topic-word probability distributions again for reference. ``` %matplotlib inline fig, (ax1,ax2) = plt.subplots(2, 1) ax1.matshow(documents[0].reshape(5,5), cmap='gray_r') ax1.set_title(r'$w$ - Document', fontsize=20) ax1.set_xticks([]) ax1.set_yticks([]) cax2 = ax2.matshow(topic_mixtures[0].reshape(1,-1), cmap='Reds', vmin=0, vmax=1) cbar = fig.colorbar(cax2, orientation='horizontal') ax2.set_title(r'$\theta$ - Topic Mixture', fontsize=20) ax2.set_xticks([]) ax2.set_yticks([]) fig.set_dpi(100) %matplotlib inline # pot fig = plot_lda(known_beta, nrows=1, ncols=10) fig.suptitle(r'Known $\beta$ - Topic-Word Probability Distributions') fig.set_dpi(160) fig.set_figheight(1.5) ``` Finally, let's plot several documents with their corresponding topic mixtures. We can see how topics with large weight in the document lead to more words in the document within the corresponding "row" or "column". ``` %matplotlib inline fig = plot_lda_topics(documents_training, 3, 4, topic_mixtures=topic_mixtures) fig.suptitle(r'$(w,\theta)$ - Documents with Known Topic Mixtures') fig.set_dpi(160) ``` # Training *** In this section we will give some insight into how AWS SageMaker LDA fits an LDA model to a corpus, create an run a SageMaker LDA training job, and examine the output trained model. ## Topic Estimation using Tensor Decompositions Given a document corpus, Amazon SageMaker LDA uses a spectral tensor decomposition technique to determine the LDA model $(\alpha, \beta)$ which most likely describes the corpus. See [1] for a primary reference of the theory behind the algorithm. The spectral decomposition, itself, is computed using the CPDecomp algorithm described in [2]. The overall idea is the following: given a corpus of documents $\mathcal{W} = \{w^{(1)}, \ldots, w^{(M)}\}, \; w^{(m)} \in \mathbb{R}^V,$ we construct a statistic tensor, $$T \in \bigotimes^3 \mathbb{R}^V$$ such that the spectral decomposition of the tensor is approximately the LDA parameters $\alpha \in \mathbb{R}^K$ and $\beta \in \mathbb{R}^{K \times V}$ which maximize the likelihood of observing the corpus for a given number of topics, $K$, $$T \approx \sum_{k=1}^K \alpha_k \; (\beta_k \otimes \beta_k \otimes \beta_k)$$ This statistic tensor encapsulates information from the corpus such as the document mean, cross correlation, and higher order statistics. For details, see [1]. > [1] Animashree Anandkumar, Rong Ge, Daniel Hsu, Sham Kakade, and Matus Telgarsky. *"Tensor Decompositions for Learning Latent Variable Models"*, Journal of Machine Learning Research, 15:2773–2832, 2014. > > [2] Tamara Kolda and Brett Bader. *"Tensor Decompositions and Applications"*. SIAM Review, 51(3):455–500, 2009. ## Store Data on S3 Before we run training we need to prepare the data. A SageMaker training job needs access to training data stored in an S3 bucket. Although training can accept data of various formats we convert the documents MXNet RecordIO Protobuf format before uploading to the S3 bucket defined at the beginning of this notebook. ``` # convert documents_training to Protobuf RecordIO format recordio_protobuf_serializer = numpy_to_record_serializer() fbuffer = recordio_protobuf_serializer(documents_training) # upload to S3 in bucket/prefix/train fname = 'lda.data' s3_object = os.path.join(prefix, 'train', fname) boto3.Session().resource('s3').Bucket(bucket).Object(s3_object).upload_fileobj(fbuffer) s3_train_data = 's3://{}/{}'.format(bucket, s3_object) print('Uploaded data to S3: {}'.format(s3_train_data)) ``` Next, we specify a Docker container containing the SageMaker LDA algorithm. For your convenience, a region-specific container is automatically chosen for you to minimize cross-region data communication ``` containers = { 'us-west-2': '266724342769.dkr.ecr.us-west-2.amazonaws.com/lda:latest', 'us-east-1': '766337827248.dkr.ecr.us-east-1.amazonaws.com/lda:latest', 'us-east-2': '999911452149.dkr.ecr.us-east-2.amazonaws.com/lda:latest', 'eu-west-1': '999678624901.dkr.ecr.eu-west-1.amazonaws.com/lda:latest' } region_name = boto3.Session().region_name container = containers[region_name] print('Using SageMaker LDA container: {} ({})'.format(container, region_name)) ``` ## Training Parameters Particular to a SageMaker LDA training job are the following hyperparameters: * **`num_topics`** - The number of topics or categories in the LDA model. * Usually, this is not known a priori. * In this example, howevever, we know that the data is generated by five topics. * **`feature_dim`** - The size of the *"vocabulary"*, in LDA parlance. * In this example, this is equal 25. * **`mini_batch_size`** - The number of input training documents. * **`alpha0`** - *(optional)* a measurement of how "mixed" are the topic-mixtures. * When `alpha0` is small the data tends to be represented by one or few topics. * When `alpha0` is large the data tends to be an even combination of several or many topics. * The default value is `alpha0 = 1.0`. In addition to these LDA model hyperparameters, we provide additional parameters defining things like the EC2 instance type on which training will run, the S3 bucket containing the data, and the AWS access role. Note that, * Recommended instance type: `ml.c4` * Current limitations: * SageMaker LDA *training* can only run on a single instance. * SageMaker LDA does not take advantage of GPU hardware. * (The Amazon AI Algorithms team is working hard to provide these capabilities in a future release!) Using the above configuration create a SageMaker client and use the client to create a training job. ``` session = sagemaker.Session() # specify general training job information lda = sagemaker.estimator.Estimator( container, role, output_path='s3://{}/{}/output'.format(bucket, prefix), train_instance_count=1, train_instance_type='ml.c4.2xlarge', sagemaker_session=session, ) # set algorithm-specific hyperparameters lda.set_hyperparameters( num_topics=num_topics, feature_dim=vocabulary_size, mini_batch_size=num_documents_training, alpha0=1.0, ) # run the training job on input data stored in S3 lda.fit({'train': s3_train_data}) ``` If you see the message > `===== Job Complete =====` at the bottom of the output logs then that means training sucessfully completed and the output LDA model was stored in the specified output path. You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab and select training job matching the training job name, below: ``` print('Training job name: {}'.format(lda.latest_training_job.job_name)) ``` ## Inspecting the Trained Model We know the LDA parameters $(\alpha, \beta)$ used to generate the example data. How does the learned model compare the known one? In this section we will download the model data and measure how well SageMaker LDA did in learning the model. First, we download the model data. SageMaker will output the model in > `s3://<bucket>/<prefix>/output/<training job name>/output/model.tar.gz`. SageMaker LDA stores the model as a two-tuple $(\alpha, \beta)$ where each LDA parameter is an MXNet NDArray. ``` # download and extract the model file from S3 job_name = lda.latest_training_job.job_name model_fname = 'model.tar.gz' model_object = os.path.join(prefix, 'output', job_name, 'output', model_fname) boto3.Session().resource('s3').Bucket(bucket).Object(model_object).download_file(fname) with tarfile.open(fname) as tar: tar.extractall() print('Downloaded and extracted model tarball: {}'.format(model_object)) # obtain the model file model_list = [fname for fname in os.listdir('.') if fname.startswith('model_')] model_fname = model_list[0] print('Found model file: {}'.format(model_fname)) # get the model from the model file and store in Numpy arrays alpha, beta = mx.ndarray.load(model_fname) learned_alpha_permuted = alpha.asnumpy() learned_beta_permuted = beta.asnumpy() print('\nLearned alpha.shape = {}'.format(learned_alpha_permuted.shape)) print('Learned beta.shape = {}'.format(learned_beta_permuted.shape)) ``` Presumably, SageMaker LDA has found the topics most likely used to generate the training corpus. However, even if this is case the topics would not be returned in any particular order. Therefore, we match the found topics to the known topics closest in L1-norm in order to find the topic permutation. Note that we will use the `permutation` later during inference to match known topic mixtures to found topic mixtures. Below plot the known topic-word probability distribution, $\beta \in \mathbb{R}^{K \times V}$ next to the distributions found by SageMaker LDA as well as the L1-norm errors between the two. ``` permutation, learned_beta = match_estimated_topics(known_beta, learned_beta_permuted) learned_alpha = learned_alpha_permuted[permutation] fig = plot_lda(np.vstack([known_beta, learned_beta]), 2, 10) fig.set_dpi(160) fig.suptitle('Known vs. Found Topic-Word Probability Distributions') fig.set_figheight(3) beta_error = np.linalg.norm(known_beta - learned_beta, 1) alpha_error = np.linalg.norm(known_alpha - learned_alpha, 1) print('L1-error (beta) = {}'.format(beta_error)) print('L1-error (alpha) = {}'.format(alpha_error)) ``` Not bad! In the eyeball-norm the topics match quite well. In fact, the topic-word distribution error is approximately 2%. # Inference *** A trained model does nothing on its own. We now want to use the model we computed to perform inference on data. For this example, that means predicting the topic mixture representing a given document. We create an inference endpoint using the SageMaker Python SDK `deploy()` function from the job we defined above. We specify the instance type where inference is computed as well as an initial number of instances to spin up. ``` lda_inference = lda.deploy( initial_instance_count=1, instance_type='ml.m4.xlarge', # LDA inference may work better at scale on ml.c4 instances ) ``` Congratulations! You now have a functioning SageMaker LDA inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console and selecting the endpoint matching the endpoint name, below: ``` print('Endpoint name: {}'.format(lda_inference.endpoint)) ``` With this realtime endpoint at our fingertips we can finally perform inference on our training and test data. We can pass a variety of data formats to our inference endpoint. In this example we will demonstrate passing CSV-formatted data. Other available formats are JSON-formatted, JSON-sparse-formatter, and RecordIO Protobuf. We make use of the SageMaker Python SDK utilities `csv_serializer` and `json_deserializer` when configuring the inference endpoint. ``` lda_inference.content_type = 'text/csv' lda_inference.serializer = csv_serializer lda_inference.deserializer = json_deserializer ``` We pass some test documents to the inference endpoint. Note that the serializer and deserializer will atuomatically take care of the datatype conversion. ``` results = lda_inference.predict(documents_test[:12]) print(results) ``` It may be hard to see but the output format of SageMaker LDA inference endpoint is a Python dictionary with the following format. ``` { 'predictions': [ {'topic_mixture': [ ... ] }, {'topic_mixture': [ ... ] }, {'topic_mixture': [ ... ] }, ... ] } ``` We extract the topic mixtures, themselves, corresponding to each of the input documents. ``` inferred_topic_mixtures_permuted = np.array([prediction['topic_mixture'] for prediction in results['predictions']]) print('Inferred topic mixtures (permuted):\n\n{}'.format(inferred_topic_mixtures_permuted)) ``` ## Inference Analysis Recall that although SageMaker LDA successfully learned the underlying topics which generated the sample data the topics were in a different order. Before we compare to known topic mixtures $\theta \in \mathbb{R}^K$ we should also permute the inferred topic mixtures ``` inferred_topic_mixtures = inferred_topic_mixtures_permuted[:,permutation] print('Inferred topic mixtures:\n\n{}'.format(inferred_topic_mixtures)) ``` Let's plot these topic mixture probability distributions alongside the known ones. ``` %matplotlib inline # create array of bar plots width = 0.4 x = np.arange(10) nrows, ncols = 3, 4 fig, ax = plt.subplots(nrows, ncols, sharey=True) for i in range(nrows): for j in range(ncols): index = i*ncols + j ax[i,j].bar(x, topic_mixtures_test[index], width, color='C0') ax[i,j].bar(x+width, inferred_topic_mixtures[index], width, color='C1') ax[i,j].set_xticks(range(num_topics)) ax[i,j].set_yticks(np.linspace(0,1,5)) ax[i,j].grid(which='major', axis='y') ax[i,j].set_ylim([0,1]) ax[i,j].set_xticklabels([]) if (i==(nrows-1)): ax[i,j].set_xticklabels(range(num_topics), fontsize=7) if (j==0): ax[i,j].set_yticklabels([0,'',0.5,'',1.0], fontsize=7) fig.suptitle('Known vs. Inferred Topic Mixtures') ax_super = fig.add_subplot(111, frameon=False) ax_super.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off') ax_super.grid(False) ax_super.set_xlabel('Topic Index') ax_super.set_ylabel('Topic Probability') fig.set_dpi(160) ``` In the eyeball-norm these look quite comparable. Let's be more scientific about this. Below we compute and plot the distribution of L1-errors from **all** of the test documents. Note that we send a new payload of test documents to the inference endpoint and apply the appropriate permutation to the output. ``` %%time # create a payload containing all of the test documents and run inference again # # TRY THIS: # try switching between the test data set and a subset of the training # data set. It is likely that LDA inference will perform better against # the training set than the holdout test set. # payload_documents = documents_test # Example 1 known_topic_mixtures = topic_mixtures_test # Example 1 #payload_documents = documents_training[:600]; # Example 2 #known_topic_mixtures = topic_mixtures_training[:600] # Example 2 print('Invoking endpoint...\n') results = lda_inference.predict(payload_documents) inferred_topic_mixtures_permuted = np.array([prediction['topic_mixture'] for prediction in results['predictions']]) inferred_topic_mixtures = inferred_topic_mixtures_permuted[:,permutation] print('known_topics_mixtures.shape = {}'.format(known_topic_mixtures.shape)) print('inferred_topics_mixtures_test.shape = {}\n'.format(inferred_topic_mixtures.shape)) %matplotlib inline l1_errors = np.linalg.norm((inferred_topic_mixtures - known_topic_mixtures), 1, axis=1) # plot the error freqency fig, ax_frequency = plt.subplots() bins = np.linspace(0,1,40) weights = np.ones_like(l1_errors)/len(l1_errors) freq, bins, _ = ax_frequency.hist(l1_errors, bins=50, weights=weights, color='C0') ax_frequency.set_xlabel('L1-Error') ax_frequency.set_ylabel('Frequency', color='C0') # plot the cumulative error shift = (bins[1]-bins[0])/2 x = bins[1:] - shift ax_cumulative = ax_frequency.twinx() cumulative = np.cumsum(freq)/sum(freq) ax_cumulative.plot(x, cumulative, marker='o', color='C1') ax_cumulative.set_ylabel('Cumulative Frequency', color='C1') # align grids and show freq_ticks = np.linspace(0, 1.5*freq.max(), 5) freq_ticklabels = np.round(100*freq_ticks)/100 ax_frequency.set_yticks(freq_ticks) ax_frequency.set_yticklabels(freq_ticklabels) ax_cumulative.set_yticks(np.linspace(0, 1, 5)) ax_cumulative.grid(which='major', axis='y') ax_cumulative.set_ylim((0,1)) fig.suptitle('Topic Mixutre L1-Errors') fig.set_dpi(110) ``` Machine learning algorithms are not perfect and the data above suggests this is true of SageMaker LDA. With more documents and some hyperparameter tuning we can obtain more accurate results against the known topic-mixtures. For now, let's just investigate the documents-topic mixture pairs that seem to do well as well as those that do not. Below we retreive a document and topic mixture corresponding to a small L1-error as well as one with a large L1-error. ``` N = 6 good_idx = (l1_errors < 0.05) good_documents = payload_documents[good_idx][:N] good_topic_mixtures = inferred_topic_mixtures[good_idx][:N] poor_idx = (l1_errors > 0.3) poor_documents = payload_documents[poor_idx][:N] poor_topic_mixtures = inferred_topic_mixtures[poor_idx][:N] %matplotlib inline fig = plot_lda_topics(good_documents, 2, 3, topic_mixtures=good_topic_mixtures) fig.suptitle('Documents With Accurate Inferred Topic-Mixtures') fig.set_dpi(120) %matplotlib inline fig = plot_lda_topics(poor_documents, 2, 3, topic_mixtures=poor_topic_mixtures) fig.suptitle('Documents With Inaccurate Inferred Topic-Mixtures') fig.set_dpi(120) ``` In this example set the documents on which inference was not as accurate tend to have a denser topic-mixture. This makes sense when extrapolated to real-world datasets: it can be difficult to nail down which topics are represented in a document when the document uses words from a large subset of the vocabulary. ## Stop / Close the Endpoint Finally, we should delete the endpoint before we close the notebook. To do so execute the cell below. Alternately, you can navigate to the "Endpoints" tab in the SageMaker console, select the endpoint with the name stored in the variable `endpoint_name`, and select "Delete" from the "Actions" dropdown menu. ``` sagemaker.Session().delete_endpoint(lda_inference.endpoint) ``` # Epilogue --- In this notebook we, * learned about the LDA model, * generated some example LDA documents and their corresponding topic-mixtures, * trained a SageMaker LDA model on a training set of documents and compared the learned model to the known model, * created an inference endpoint, * used the endpoint to infer the topic mixtures of a test input and analyzed the inference error. There are several things to keep in mind when applying SageMaker LDA to real-word data such as a corpus of text documents. Note that input documents to the algorithm, both in training and inference, need to be vectors of integers representing word counts. Each index corresponds to a word in the corpus vocabulary. Therefore, one will need to "tokenize" their corpus vocabulary. $$ \text{"cat"} \mapsto 0, \; \text{"dog"} \mapsto 1 \; \text{"bird"} \mapsto 2, \ldots $$ Each text document then needs to be converted to a "bag-of-words" format document. $$ w = \text{"cat bird bird bird cat"} \quad \longmapsto \quad w = [2, 0, 3, 0, \ldots, 0] $$ Also note that many real-word applications have large vocabulary sizes. It may be necessary to represent the input documents in sparse format. Finally, the use of stemming and lemmatization in data preprocessing provides several benefits. Doing so can improve training and inference compute time since it reduces the effective vocabulary size. More importantly, though, it can improve the quality of learned topic-word probability matrices and inferred topic mixtures. For example, the words *"parliament"*, *"parliaments"*, *"parliamentary"*, *"parliament's"*, and *"parliamentarians"* are all essentially the same word, *"parliament"*, but with different conjugations. For the purposes of detecting topics, such as a *"politics"* or *governments"* topic, the inclusion of all five does not add much additional value as they all essentiall describe the same feature.
github_jupyter
<a href="https://colab.research.google.com/github/totti0223/deep_learning_for_biologists_with_keras/blob/master/notebooks/PlantDisease_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Training a Plant Disease Diagnosis Model with PlantVillage Dataset ``` import numpy as np import os import matplotlib.pyplot as plt from skimage.io import imread from sklearn.metrics import classification_report, confusion_matrix from sklearn .model_selection import train_test_split import keras import keras.backend as K from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator from keras.utils.np_utils import to_categorical from keras import layers from keras.models import Sequential, Model from keras.callbacks import EarlyStopping, ModelCheckpoint ``` # Preparation ## Data Preparation ``` !apt-get install subversion > /dev/null #Retreive specifc diseases of tomato for training !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___Bacterial_spot image/Tomato___Bacterial_spot > /dev/null !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___Early_blight image/Tomato___Early_blight > /dev/null !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___Late_blight image/Tomato___Late_blight > /dev/null !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___Septoria_leaf_spot image/Tomato___Septoria_leaf_spot > /dev/null !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___Target_Spot image/Tomato___Target_Spot > /dev/null !svn export https://github.com/spMohanty/PlantVillage-Dataset/trunk/raw/color/Tomato___healthy image/Tomato___healthy > /dev/null #folder structure !ls image plt.figure(figsize=(15,10)) #visualize several images parent_directory = "image" for i, folder in enumerate(os.listdir(parent_directory)): print(folder) folder_directory = os.path.join(parent_directory,folder) files = os.listdir(folder_directory) #will inspect only 1 image per folder file = files[0] file_path = os.path.join(folder_directory,file) image = imread(file_path) plt.subplot(1,6,i+1) plt.imshow(image) plt.axis("off") name = folder.split("___")[1][:-1] plt.title(name) #plt.show() #load everything into memory x = [] y = [] class_names = [] parent_directory = "image" for i,folder in enumerate(os.listdir(parent_directory)): print(i,folder) class_names.append(folder) folder_directory = os.path.join(parent_directory,folder) files = os.listdir(folder_directory) #will inspect only 1 image per folder for file in files: file_path = os.path.join(folder_directory,file) image = load_img(file_path,target_size=(64,64)) image = img_to_array(image)/255. x.append(image) y.append(i) x = np.array(x) y = to_categorical(y) #check the data shape print(x.shape) print(y.shape) print(y[0]) x_train, _x, y_train, _y = train_test_split(x,y,test_size=0.2, stratify = y, random_state = 1) x_valid,x_test, y_valid, y_test = train_test_split(_x,_y,test_size=0.4, stratify = _y, random_state = 1) print("train data:",x_train.shape,y_train.shape) print("validation data:",x_valid.shape,y_valid.shape) print("test data:",x_test.shape,y_test.shape) ``` ## Model Preparation ``` K.clear_session() nfilter = 32 #VGG16 like model model = Sequential([ #block1 layers.Conv2D(nfilter,(3,3),padding="same",name="block1_conv1",input_shape=(64,64,3)), layers.Activation("relu"), layers.BatchNormalization(), #layers.Dropout(rate=0.2), layers.Conv2D(nfilter,(3,3),padding="same",name="block1_conv2"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.MaxPooling2D((2,2),strides=(2,2),name="block1_pool"), #block2 layers.Conv2D(nfilter*2,(3,3),padding="same",name="block2_conv1"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.Conv2D(nfilter*2,(3,3),padding="same",name="block2_conv2"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.MaxPooling2D((2,2),strides=(2,2),name="block2_pool"), #block3 layers.Conv2D(nfilter*2,(3,3),padding="same",name="block3_conv1"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.Conv2D(nfilter*4,(3,3),padding="same",name="block3_conv2"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.Conv2D(nfilter*4,(3,3),padding="same",name="block3_conv3"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.MaxPooling2D((2,2),strides=(2,2),name="block3_pool"), #layers.Flatten(), layers.GlobalAveragePooling2D(), #inference layer layers.Dense(128,name="fc1"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.Dense(128,name="fc2"), layers.BatchNormalization(), layers.Activation("relu"), #layers.Dropout(rate=0.2), layers.Dense(6,name="prepredictions"), layers.Activation("softmax",name="predictions") ]) model.compile(optimizer = "adam", loss="categorical_crossentropy", metrics=["accuracy"]) model.summary() ``` ## Training ``` #utilize early stopping function to stop at the lowest validation loss es = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto') #utilize save best weight model during training ckpt = ModelCheckpoint("PlantDiseaseCNNmodel.hdf5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) #we will define a generator class for training data and validation data seperately, as no augmentation is not required for validation data t_gen = ImageDataGenerator(rotation_range=90,horizontal_flip=True) v_gen = ImageDataGenerator() train_gen = t_gen.flow(x_train,y_train,batch_size=98) valid_gen = v_gen.flow(x_valid,y_valid,batch_size=98) history = model.fit_generator( train_gen, steps_per_epoch = train_gen.n // 98, callbacks = [es,ckpt], validation_data = valid_gen, validation_steps = valid_gen.n // 98, epochs=50) ``` ## Evaluation ``` #load the model weight file with lowest validation loss model.load_weights("PlantDiseaseCNNmodel.hdf5") #or can obtain the pretrained model from the github repo. #check the model metrics print(model.metrics_names) #evaluate training data print(model.evaluate(x= x_train, y = y_train)) #evaluate validation data print(model.evaluate(x= x_valid, y = y_valid)) #evaluate test data print(model.evaluate(x= x_test, y = y_test)) #draw a confusion matrix #true label y_true = np.argmax(y_test,axis=1) #prediction label Y_pred = model.predict(x_test) y_pred = np.argmax(Y_pred, axis=1) print(y_true) print(y_pred) #https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data #classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots(figsize=(5,5)) im = ax.imshow(cm, interpolation='nearest', cmap=cmap) #ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax np.set_printoptions(precision=2) plot_confusion_matrix(y_true, y_pred, classes=class_names, normalize=True, title='Normalized confusion matrix') ``` ## Predicting Indivisual Images ``` n = 15 #do not exceed (number of test image - 1) plt.imshow(x_test[n]) plt.show() true_label = np.argmax(y_test,axis=1)[n] print("true_label is:",true_label,":",class_names[true_label]) prediction = model.predict(x_test[n][np.newaxis,...])[0] print("predicted_value is:",prediction) predicted_label = np.argmax(prediction) print("predicted_label is:",predicted_label,":",class_names[predicted_label]) if true_label == predicted_label: print("correct prediction") else: print("wrong prediction") ```
github_jupyter
# Single-stepping the `logictools` Pattern Generator * This notebook will show how to use single-stepping mode with the pattern generator * Note that all generators in the _logictools_ library may be **single-stepped** ### Visually ... #### The _logictools_ library on the Zynq device on the PYNQ board ![](./images/single-stepping_pattern_generator.png) ### Demonstrator notes * For this demo, the pattern generator implements a simple, 4-bit binary, up-counter * We will single-step the clock and verify the counter operation * The output is verified using the waveforms captured by the trace analyzer ### Points to note * __Everything__ runs on the Zynq chip on the PYNQ board, even this slide show! * We will specify and implement circuits __using only Python code__ * __No__ Xilinx CAD tools are used * We can create live, real-time circuits __instantaneously__ ``` # Specify a stimulus waveform and display it from pynq.overlays.logictools import LogicToolsOverlay from pynq.lib.logictools import Waveform logictools_olay = LogicToolsOverlay('logictools.bit') up_counter_stimulus = {'signal': [ {'name': 'bit0', 'pin': 'D0', 'wave': 'lh' * 8}, {'name': 'bit1', 'pin': 'D1', 'wave': 'l.h.' * 4}, {'name': 'bit2', 'pin': 'D2', 'wave': 'l...h...' * 2}, {'name': 'bit3', 'pin': 'D3', 'wave': 'l.......h.......'}]} # Check visually that the stimulus pattern is correct waveform = Waveform(up_counter_stimulus) waveform.display() # Add the signals we want to analyze up_counter = {'signal': [ ['stimulus', {'name': 'bit0', 'pin': 'D0', 'wave': 'lh' * 8}, {'name': 'bit1', 'pin': 'D1', 'wave': 'l.h.' * 4}, {'name': 'bit2', 'pin': 'D2', 'wave': 'l...h...' * 2}, {'name': 'bit3', 'pin': 'D3', 'wave': 'l.......h.......'}], {}, ['analysis', {'name': 'bit0_output', 'pin': 'D0'}, {'name': 'bit1_output', 'pin': 'D1'}, {'name': 'bit2_output', 'pin': 'D2'}, {'name': 'bit3_output', 'pin': 'D3'}]]} # Display the stimulus and analysis signal groups waveform = Waveform(up_counter) waveform.display() # Configure the pattern generator and analyzer pattern_generator = logictools_olay.pattern_generator pattern_generator.trace(num_analyzer_samples=16) pattern_generator.setup(up_counter, stimulus_group_name='stimulus', analysis_group_name='analysis') # Press `cntrl-enter` to advance the pattern generator by one clock cycle pattern_generator.step() pattern_generator.show_waveform() # Advance an arbitrary number of cycles no_of_cycles = 7 for _ in range(no_of_cycles): pattern_generator.step() pattern_generator.show_waveform() # Finally, reset the pattern generator after use pattern_generator.reset() ```
github_jupyter
``` import os, os.path import pickle import time import numpy from scipy import interpolate from galpy.util import bovy_conversion, bovy_plot, save_pickles import gd1_util from gd1_util import R0, V0 import seaborn as sns from matplotlib import cm, pyplot import simulate_streampepper import statsmodels.api as sm lowess = sm.nonparametric.lowess %pylab inline from matplotlib.ticker import NullFormatter, FuncFormatter save_figures= False ``` # Figures for the section on approximately computing the stream structure ``` # Load the smooth and peppered stream sdf_smooth= gd1_util.setup_gd1model() pepperfilename= 'gd1pepper.pkl' if os.path.exists(pepperfilename): with open(pepperfilename,'rb') as savefile: sdf_pepper= pickle.load(savefile) else: timpacts= simulate_streampepper.parse_times('256sampling',9.) sdf_pepper= gd1_util.setup_gd1model(timpact=timpacts, hernquist=True) save_pickles(pepperfilename,sdf_pepper) ``` ## Is the mean perpendicular frequency close to zero? ``` # Sampling functions massrange=[5.,9.] plummer= False Xrs= 5. nsubhalo= simulate_streampepper.nsubhalo rs= simulate_streampepper.rs dNencdm= simulate_streampepper.dNencdm sample_GM= lambda: (10.**((-0.5)*massrange[0])\ +(10.**((-0.5)*massrange[1])\ -10.**((-0.5)*massrange[0]))\ *numpy.random.uniform())**(1./(-0.5))\ /bovy_conversion.mass_in_msol(V0,R0) rate_range= numpy.arange(massrange[0]+0.5,massrange[1]+0.5,1) rate= numpy.sum([dNencdm(sdf_pepper,10.**r,Xrs=Xrs, plummer=plummer) for r in rate_range]) sample_rs= lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10., plummer=plummer) numpy.random.seed(2) sdf_pepper.simulate(rate=rate,sample_GM=sample_GM,sample_rs=sample_rs,Xrs=Xrs) n= 100000 aa_mock_per= sdf_pepper.sample(n=n,returnaAdt=True) dO= numpy.dot(aa_mock_per[0].T-sdf_pepper._progenitor_Omega, sdf_pepper._sigomatrixEig[1][:,sdf_pepper._sigomatrixEigsortIndx]) dO[:,2]*= sdf_pepper._sigMeanSign da= numpy.dot(aa_mock_per[1].T-sdf_pepper._progenitor_angle, sdf_pepper._sigomatrixEig[1][:,sdf_pepper._sigomatrixEigsortIndx]) da[:,2]*= sdf_pepper._sigMeanSign apar= da[:,2] xs= numpy.linspace(0.,1.5,1001) mO_unp= numpy.array([sdf_smooth.meanOmega(x,oned=True,use_physical=False) for x in xs]) mOint= interpolate.InterpolatedUnivariateSpline(xs,mO_unp,k=3) mOs= mOint(apar) frac= 0.02 alpha=0.01 linecolor='0.65' bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(12,4) subplot(1,3,1) bovy_plot.bovy_plot(apar[::3],dO[::3,2]/mOs[::3]-1,'k.',alpha=alpha*2,gcf=True, rasterized=True,xrange=[0.,1.5],yrange=[-1.2,1.2]) z= lowess(dO[:,2]/mOs-1,apar,frac=frac) plot(z[::100,0],z[::100,1],color=linecolor,lw=2.5) #xlim(0.,1.5) #ylim(-1.2,1.2) xlabel(r'$\Delta\theta_\parallel$') bovy_plot.bovy_text(r'$\Delta\Omega_\parallel/\langle\Delta\Omega^0_\parallel\rangle-1$',top_left=True, size=18.) subplot(1,3,2) bovy_plot.bovy_plot(apar[::3],dO[::3,1]/mOs[::3],'k.',alpha=alpha*2,gcf=True, rasterized=True,xrange=[0.,1.5],yrange=[-0.05,0.05]) z= lowess(dO[:,1]/mOs,apar,frac=frac) plot(z[::100,0],z[::100,1],color=linecolor,lw=2.5) #xlim(0.,1.5) #ylim(-0.05,0.05) xlabel(r'$\Delta\theta_\parallel$') bovy_plot.bovy_text(r'$\Delta\Omega_{\perp,1}/\langle\Delta\Omega^0_\parallel\rangle$',top_left=True, size=18.) subplot(1,3,3) bovy_plot.bovy_plot(apar[::3],dO[::3,0]/mOs[::3],'k.',alpha=alpha,gcf=True, rasterized=True,xrange=[0.,1.5],yrange=[-0.05,0.05]) z= lowess(dO[:,0]/mOs,apar,frac=frac) plot(z[::100,0],z[::100,1],color=linecolor,lw=2.5) #xlim(0.,1.5) #ylim(-0.05,0.05) xlabel(r'$\Delta\theta_\parallel$') bovy_plot.bovy_text(r'$\Delta\Omega_{\perp,2}/\langle\Delta\Omega^0_\parallel\rangle$',top_left=True, size=18.) if save_figures: tight_layout() bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1like_meanOparOperp.pdf')) print "This stream had %i impacts" % len(sdf_pepper._GM) ``` ## Test the single-impact approximations ``` # Setup a single, large impact m= 10.**8. GM= 10**8./bovy_conversion.mass_in_msol(V0,R0) timpactIndx= numpy.argmin(numpy.fabs(numpy.array(sdf_pepper._uniq_timpact)-1.3/bovy_conversion.time_in_Gyr(V0,R0))) # Load the single-impact stream gapfilename= 'gd1single.pkl' if os.path.exists(gapfilename): with open(gapfilename,'rb') as savefile: sdf_gap= pickle.load(savefile) else: sdf_gap= gd1_util.setup_gd1model(hernquist=True, singleImpact=True, impactb=0.5*rs(m), subhalovel=numpy.array([-25.,155.,30.])/V0, impact_angle=0.6, timpact=sdf_pepper._uniq_timpact[timpactIndx], GM=GM,rs=rs(m)) save_pickles(gapfilename,sdf_gap) n= 100000 aa_mock_per= sdf_gap.sample(n=n,returnaAdt=True) dO= numpy.dot(aa_mock_per[0].T-sdf_gap._progenitor_Omega, sdf_gap._sigomatrixEig[1][:,sdf_gap._sigomatrixEigsortIndx]) dO[:,2]*= sdf_gap._sigMeanSign da= numpy.dot(aa_mock_per[1].T-sdf_gap._progenitor_angle, sdf_gap._sigomatrixEig[1][:,sdf_gap._sigomatrixEigsortIndx]) da[:,2]*= sdf_gap._sigMeanSign num= True apar= numpy.arange(0.,sdf_smooth.length()+0.003,0.003) dens_unp= numpy.array([sdf_smooth._density_par(x) for x in apar]) dens_approx= numpy.array([sdf_gap.density_par(x,approx=True) for x in apar]) dens_approx_higherorder= numpy.array([sdf_gap._density_par(x,approx=True,higherorder=True) for x in apar]) # normalize dens_unp= dens_unp/numpy.sum(dens_unp)/(apar[1]-apar[0]) dens_approx= dens_approx/numpy.sum(dens_approx)/(apar[1]-apar[0]) dens_approx_higherorder= dens_approx_higherorder/numpy.sum(dens_approx_higherorder)/(apar[1]-apar[0]) if num: dens_num= numpy.array([sdf_gap.density_par(x,approx=False) for x in apar]) dens_num= dens_num/numpy.sum(dens_num)/(apar[1]-apar[0]) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(6,7) axTop= pyplot.axes([0.15,0.3,0.825,0.65]) fig= pyplot.gcf() fig.sca(axTop) bovy_plot.bovy_plot(apar,dens_approx,lw=2.5,gcf=True, color='k', xrange=[0.,1.], yrange=[0.,2.24], ylabel=r'$\mathrm{density}$') plot(apar,dens_unp,lw=3.5,color='k',ls='--',zorder=0) nullfmt = NullFormatter() # no labels axTop.xaxis.set_major_formatter(nullfmt) dum= hist(da[:,2],bins=101,normed=True,range=[apar[0],apar[-1]], histtype='step',color='0.55',zorder=0,lw=3.) axBottom= pyplot.axes([0.15,0.1,0.825,0.2]) fig= pyplot.gcf() fig.sca(axBottom) bovy_plot.bovy_plot(apar,100.*(dens_approx_higherorder-dens_approx)/dens_approx_higherorder, lw=2.5,gcf=True,color='k', xrange=[0.,1.], yrange=[-0.145,0.145], zorder=2, xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{relative\ difference\ in}\ \%$') if num: plot(apar,100.*(dens_num-dens_approx_higherorder)/dens_approx_higherorder, lw=2.5,zorder=1,color='0.55') # label aparIndx= numpy.argmin(numpy.fabs(apar-0.64)) plot([0.45,apar[aparIndx]],[0.06,(100.*(dens_approx_higherorder-dens_approx)/dens_approx_higherorder)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.1,0.07,r'$\mathrm{higher\!\!-\!\!order\ minus\ linear}$',size=17.) if num: aparIndx= numpy.argmin(numpy.fabs(apar-0.62)) plot([0.45,apar[aparIndx]],[-0.07,(100.*(dens_num-dens_approx_higherorder)/dens_approx_higherorder)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.05,-0.12,r'$\mathrm{numerical\ minus\ higher\!\!-\!\!order}$',size=17.) if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1likeSingle_densapprox.pdf')) mO_unp= numpy.array([sdf_smooth.meanOmega(x,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) mO_approx= numpy.array([sdf_gap.meanOmega(x,approx=True,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) mO_approx_higherorder= numpy.array([sdf_gap.meanOmega(x,oned=True,approx=True,higherorder=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) if num: mO_num= numpy.array([sdf_gap.meanOmega(x,approx=False,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) frac= 0.005 alpha=0.01 linecolor='0.65' bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(6,7) axTop= pyplot.axes([0.15,0.3,0.825,0.65]) fig= pyplot.gcf() fig.sca(axTop) bovy_plot.bovy_plot(apar,mO_approx,lw=2.5,gcf=True, color='k', xrange=[0.,1.], yrange=[0.,0.2], ylabel=r'$\Delta \Omega_\parallel\,(\mathrm{Gyr}^{-1})$') plot(apar,mO_unp,lw=2.5,color='k',ls='--') plot(da[::3,2],dO[::3,2]*bovy_conversion.freq_in_Gyr(V0,R0), 'k.',alpha=alpha*2,rasterized=True) nullfmt = NullFormatter() # no labels axTop.xaxis.set_major_formatter(nullfmt) axBottom= pyplot.axes([0.15,0.1,0.825,0.2]) fig= pyplot.gcf() fig.sca(axBottom) bovy_plot.bovy_plot(apar,100.*(mO_approx_higherorder-mO_approx)/mO_approx_higherorder, lw=2.5,gcf=True,color='k', xrange=[0.,1.],zorder=1, yrange=[-0.039,0.039], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{relative\ difference\ in\ \%}$') if num: plot(apar,100.*(mO_num-mO_approx_higherorder)/mO_approx_higherorder, lw=2.5,color='0.55',zorder=0) # label aparIndx= numpy.argmin(numpy.fabs(apar-0.64)) plot([0.45,apar[aparIndx]],[0.024,(100.*(mO_approx_higherorder-mO_approx)/mO_approx_higherorder)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.1,0.026,r'$\mathrm{higher\!\!-\!\!order\ minus\ linear}$',size=17.) aparIndx= numpy.argmin(numpy.fabs(apar-0.6)) if num: plot([0.45,apar[aparIndx]],[-0.02,(100.*(mO_num-mO_approx_higherorder)/mO_approx_higherorder)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.05,-0.03,r'$\mathrm{numerical\ minus\ higher\!\!-\!\!order}$',size=17.) if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1likeSingle_mOparapprox.pdf')) start= time.time() numpy.array([sdf_gap.density_par(x,approx=False) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.density_par(x,approx=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.density_par(x,approx=True,higherorder=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.meanOmega(x,approx=False,oned=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.meanOmega(x,approx=True,oned=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.meanOmega(x,approx=True,oned=True,higherorder=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) ``` ## Test the multiple-impact approximations ``` # Setup a four, intermediate impacts m= [10.**7.,10.**7.25,10.**6.75,10.**7.5] GM= [mm/bovy_conversion.mass_in_msol(V0,R0) for mm in m] timpactIndx= [numpy.argmin(numpy.fabs(numpy.array(sdf_pepper._uniq_timpact)-1.3/bovy_conversion.time_in_Gyr(V0,R0))), numpy.argmin(numpy.fabs(numpy.array(sdf_pepper._uniq_timpact)-2.3/bovy_conversion.time_in_Gyr(V0,R0))), numpy.argmin(numpy.fabs(numpy.array(sdf_pepper._uniq_timpact)-3.3/bovy_conversion.time_in_Gyr(V0,R0))), numpy.argmin(numpy.fabs(numpy.array(sdf_pepper._uniq_timpact)-4.3/bovy_conversion.time_in_Gyr(V0,R0)))] sdf_pepper.set_impacts(impactb=[0.5*rs(m[0]),2.*rs(m[1]),1.*rs(m[2]),2.5*rs(m[3])], subhalovel=numpy.array([[-25.,155.,30.], [125.,35.,80.], [-225.,5.,-40.], [25.,-155.,37.]])/V0, impact_angle=[0.6,0.4,0.3,0.3], timpact=[sdf_pepper._uniq_timpact[ti] for ti in timpactIndx], GM=GM,rs=[rs(mm) for mm in m]) sdf_gap= sdf_pepper n= 100000 aa_mock_per= sdf_pepper.sample(n=n,returnaAdt=True) dO= numpy.dot(aa_mock_per[0].T-sdf_gap._progenitor_Omega, sdf_gap._sigomatrixEig[1][:,sdf_gap._sigomatrixEigsortIndx]) dO[:,2]*= sdf_gap._sigMeanSign da= numpy.dot(aa_mock_per[1].T-sdf_gap._progenitor_angle, sdf_gap._sigomatrixEig[1][:,sdf_gap._sigomatrixEigsortIndx]) da[:,2]*= sdf_gap._sigMeanSign num= True apar= numpy.arange(0.,sdf_smooth.length()+0.003,0.003) dens_unp= numpy.array([sdf_smooth._density_par(x) for x in apar]) dens_approx= numpy.array([sdf_gap.density_par(x,approx=True) for x in apar]) # normalize dens_unp= dens_unp/numpy.sum(dens_unp)/(apar[1]-apar[0]) dens_approx= dens_approx/numpy.sum(dens_approx)/(apar[1]-apar[0]) if num: dens_num= numpy.array([sdf_gap.density_par(x,approx=False) for x in apar]) dens_num= dens_num/numpy.sum(dens_num)/(apar[1]-apar[0]) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(6,7) axTop= pyplot.axes([0.15,0.3,0.825,0.65]) fig= pyplot.gcf() fig.sca(axTop) bovy_plot.bovy_plot(apar,dens_approx,lw=2.5,gcf=True, color='k', xrange=[0.,1.], yrange=[0.,2.24], ylabel=r'$\mathrm{density}$') plot(apar,dens_unp,lw=3.5,color='k',ls='--',zorder=0) nullfmt = NullFormatter() # no labels axTop.xaxis.set_major_formatter(nullfmt) dum= hist(da[:,2],bins=101,normed=True,range=[apar[0],apar[-1]], histtype='step',color='0.55',zorder=0,lw=3.) axBottom= pyplot.axes([0.15,0.1,0.825,0.2]) fig= pyplot.gcf() fig.sca(axBottom) if num: bovy_plot.bovy_plot(apar,100.*(dens_num-dens_approx)/dens_approx, lw=2.5,gcf=True,color='k', xrange=[0.,1.], yrange=[-1.45,1.45], zorder=2, xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{relative\ difference\ in}\ \%$') # label if num: aparIndx= numpy.argmin(numpy.fabs(apar-0.6)) plot([0.45,apar[aparIndx]],[0.7,(100.*(dens_num-dens_approx)/dens_approx)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.15,0.4,r'$\mathrm{numerical\ minus}$'+'\n'+r'$\mathrm{approximation}$',size=17.) if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1likeMulti_densapprox.pdf')) mO_unp= numpy.array([sdf_smooth.meanOmega(x,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) mO_approx= numpy.array([sdf_gap.meanOmega(x,approx=True,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) if num: mO_num= numpy.array([sdf_gap.meanOmega(x,approx=False,oned=True) for x in apar])\ *bovy_conversion.freq_in_Gyr(V0,R0) frac= 0.005 alpha=0.01 linecolor='0.65' bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(6,7) axTop= pyplot.axes([0.15,0.3,0.825,0.65]) fig= pyplot.gcf() fig.sca(axTop) bovy_plot.bovy_plot(apar,mO_approx,lw=2.5,gcf=True, color='k', xrange=[0.,1.], yrange=[0.,0.2], ylabel=r'$\Delta \Omega_\parallel\,(\mathrm{Gyr}^{-1})$') plot(apar,mO_unp,lw=2.5,color='k',ls='--') plot(da[::3,2],dO[::3,2]*bovy_conversion.freq_in_Gyr(V0,R0), 'k.',alpha=alpha*2,rasterized=True) nullfmt = NullFormatter() # no labels axTop.xaxis.set_major_formatter(nullfmt) axBottom= pyplot.axes([0.15,0.1,0.825,0.2]) fig= pyplot.gcf() fig.sca(axBottom) if num: bovy_plot.bovy_plot(apar,100.*(mO_num-mO_approx)/mO_approx, lw=2.5,gcf=True,color='k', xrange=[0.,1.],zorder=1, yrange=[-0.39,0.39], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{relative\ difference\ in\ \%}$') # label if num: aparIndx= numpy.argmin(numpy.fabs(apar-0.6)) plot([0.35,apar[aparIndx]],[0.2,(100.*(mO_num-mO_approx)/mO_approx)[aparIndx]], 'k',lw=1.) bovy_plot.bovy_text(0.05,0.1,r'$\mathrm{numerical\ minus}$'+'\n'+r'$\mathrm{approximation}$',size=17.) if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1likeMulti_mOparapprox.pdf')) start= time.time() numpy.array([sdf_gap.density_par(x,approx=False) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.density_par(x,approx=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.meanOmega(x,approx=False,oned=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) start= time.time() numpy.array([sdf_gap.meanOmega(x,approx=True,oned=True) for x in apar[::10]]) end= time.time() print (end-start)*1000.*10./len(apar) ``` ## Computational speed ``` nimp= 2**numpy.arange(1,9) ntrials= 3 nsample= [10,10,10,10,10,10,33,33,33] compt= numpy.zeros(len(nimp)) for ii,ni in enumerate(nimp): tcompt= 0. for t in range(ntrials): nimpact=ni timpacts= numpy.random.permutation(numpy.array(sdf_pepper._uniq_timpact))[:ni] print len(timpacts) impact_angles= numpy.array([\ sdf_pepper._icdf_stream_len[ti](numpy.random.uniform()) for ti in timpacts]) GMs= numpy.array([sample_GM() for a in impact_angles]) rss= numpy.array([sample_rs(gm) for gm in GMs]) impactbs= numpy.random.uniform(size=len(impact_angles))*Xrs*rss subhalovels= numpy.empty((len(impact_angles),3)) for jj in range(len(timpacts)): subhalovels[jj]=\ sdf_pepper._draw_impact_velocities(timpacts[jj],120./V0, impact_angles[jj],n=1)[0] # Flip angle sign if necessary if not sdf_pepper._gap_leading: impact_angles*= -1. # Setup sdf_pepper.set_impacts(impact_angle=impact_angles, impactb=impactbs, subhalovel=subhalovels, timpact=timpacts, GM=GMs,rs=rss) start= time.time() numpy.array([sdf_pepper.density_par(x,approx=True) for x in apar[::nsample[ii]]]) end= time.time() tcompt+= (end-start)*1000.*nsample[ii]/len(apar) compt[ii]= tcompt/ntrials bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.) figsize(6,4) bovy_plot.bovy_plot(numpy.log2(nimp),compt,'ko', semilogy=True, xrange=[0.,9.], yrange=[.5,100000.], ylabel=r'$\mathrm{time}\,(\mathrm{ms})$', xlabel=r'$\mathrm{number\ of\ impacts}$') p= numpy.polyfit(numpy.log10(nimp),numpy.log10(compt),deg=1) bovy_plot.bovy_plot(numpy.log2(nimp),10.**(p[0]*numpy.log10(nimp)+p[1]), '-',lw=2., color=(0.0, 0.4470588235294118, 0.6980392156862745), overplot=True) pyplot.text(0.3,0.075, r'$\log_{10}\ \mathrm{time/ms} = %.2f \,\log_{10} N %.2f$' % (p[0],p[1]), transform=pyplot.gca().transAxes,size=14.) # Use 100, 1000 instead of 10^2, 10^3 gca().yaxis.set_major_formatter(ScalarFormatter()) def twoto(x,pos): return r'$%i$' % (2**x) formatter = FuncFormatter(twoto) gca().xaxis.set_major_formatter(formatter) gcf().subplots_adjust(left=0.175,bottom=0.15,right=0.95,top=0.95) if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1likeMulti_compTime.pdf')) ``` ## Example densities and tracks ### Single masses ``` # Load our fiducial simulation's output, for apars and smooth stream data= numpy.genfromtxt(os.path.join(os.getenv('DATADIR'),'streamgap-pepper','gd1_multtime', 'gd1_t64sampling_X5_5-9_dens.dat'), delimiter=',',max_rows=2) apars= data[0] dens_unp= data[1] data= numpy.genfromtxt(os.path.join(os.getenv('DATADIR'),'streamgap-pepper','gd1_multtime', 'gd1_t64sampling_X5_5-9_omega.dat'), delimiter=',',max_rows=2) omega_unp= data[1] dens_example= [] omega_example= [] # Perform some simulations, for different mass ranges numpy.random.seed(3) nexample= 4 masses= [5.5,6.5,7.5,8.5] for ii in range(nexample): # Sampling functions sample_GM= lambda: 10.**(masses[ii]-10.)\ /bovy_conversion.mass_in_1010msol(V0,R0) rate= dNencdm(sdf_pepper,10.**masses[ii],Xrs=Xrs, plummer=plummer) sdf_pepper.simulate(rate=rate,sample_GM=sample_GM,sample_rs=sample_rs,Xrs=Xrs) densOmega= numpy.array([sdf_pepper._densityAndOmega_par_approx(a) for a in apars]).T dens_example.append(densOmega[0]) omega_example.append(densOmega[1]) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False for ii in range(nexample): bovy_plot.bovy_plot(apars,dens_example[ii]/dens_unp+2.*ii+0.5*(ii>2),lw=2.5, color='k', xrange=[0.,1.3], yrange=[0.,2.*nexample+1], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{density}/\mathrm{smooth\ density}+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii+0.5*(ii>2),lw=1.5,color='k',ls='--',zorder=0) bovy_plot.bovy_text(1.025,1.+2.*ii+0.5*(ii>2),r'$10^{%.1f}\,M_\odot$' % masses[ii],verticalalignment='center',size=18.) overplot=True if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats', 'gd1like_densexample_singlemasses.pdf')) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False mult= [3.,3.,1.,1.] for ii in range(nexample): bovy_plot.bovy_plot(apars,mult[ii]*(omega_example[ii]/omega_unp-1.)+1.+2.*ii+0.5*(ii>2), lw=2.5, color='k', xrange=[0.,1.3], yrange=[0.,2.*nexample+1.], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\langle\Delta \Omega_\parallel\rangle\big/\langle\Delta \Omega_\parallel^0\rangle+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii+0.5*(ii>2),lw=1.5,color='k',ls='--',zorder=0) bovy_plot.bovy_text(1.025,1.+2.*ii+0.5*(ii>2),r'$10^{%.1f}\,M_\odot$' % masses[ii],verticalalignment='center',size=18.) bovy_plot.bovy_text(0.025,1.+2.*ii+0.1+0.5*(ii>2),r'$\times%i$' % mult[ii],size=18.) overplot= True if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats', 'gd1like_omegaexample_singlemasses.pdf')) ``` ### Full mass range First look at low apar resolution: ``` apars= apars[::30] dens_unp= dens_unp[::30] omega_unp= omega_unp[::30] # Sampling functions massrange=[5.,9.] plummer= False Xrs= 5. nsubhalo= simulate_streampepper.nsubhalo rs= simulate_streampepper.rs dNencdm= simulate_streampepper.dNencdm sample_GM= lambda: (10.**((-0.5)*massrange[0])\ +(10.**((-0.5)*massrange[1])\ -10.**((-0.5)*massrange[0]))\ *numpy.random.uniform())**(1./(-0.5))\ /bovy_conversion.mass_in_msol(V0,R0) rate_range= numpy.arange(massrange[0]+0.5,massrange[1]+0.5,1) rate= numpy.sum([dNencdm(sdf_pepper,10.**r,Xrs=Xrs, plummer=plummer) for r in rate_range]) sample_rs= lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10., plummer=plummer) dens_example2= [] omega_example2= [] # Perform some simulations numpy.random.seed(3) nexample= 4 for ii in range(nexample): sdf_pepper.simulate(rate=rate,sample_GM=sample_GM,sample_rs=sample_rs,Xrs=Xrs) densOmega= numpy.array([sdf_pepper._densityAndOmega_par_approx(a) for a in apars]).T dens_example2.append(densOmega[0]) omega_example2.append(densOmega[1]) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False for ii in range(nexample): bovy_plot.bovy_plot(apars,dens_example2[ii]/dens_unp+2.*ii,lw=2.5, color='k', xrange=[0.,1.], yrange=[0.,2.*nexample+1.], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{density}/\mathrm{smooth\ density}+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii,lw=1.5,color='k',ls='--',zorder=0) overplot=True bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False for ii in range(nexample): bovy_plot.bovy_plot(apars,omega_example2[ii]/omega_unp+2.*ii,lw=2.5, color='k', xrange=[0.,1.], yrange=[0.,2.*nexample], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\langle\Delta \Omega_\parallel\rangle\big/\langle\Delta \Omega_\parallel^0\rangle+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii,lw=1.5,color='k',ls='--',zorder=0) overplot= True ``` At full apar resolution: ``` # Load our fiducial simulation's output, for apars and smooth stream data= numpy.genfromtxt(os.path.join(os.getenv('DATADIR'),'streamgap-pepper','gd1_multtime', 'gd1_t64sampling_X5_5-9_dens.dat'), delimiter=',',max_rows=2) apars= data[0] dens_unp= data[1] data= numpy.genfromtxt(os.path.join(os.getenv('DATADIR'),'streamgap-pepper','gd1_multtime', 'gd1_t64sampling_X5_5-9_omega.dat'), delimiter=',',max_rows=2) omega_unp= data[1] dens_example2= [] omega_example2= [] # Perform some simulations numpy.random.seed(3) nexample= 4 for ii in range(nexample): sdf_pepper.simulate(rate=rate,sample_GM=sample_GM,sample_rs=sample_rs,Xrs=Xrs) densOmega= numpy.array([sdf_pepper._densityAndOmega_par_approx(a) for a in apars]).T dens_example2.append(densOmega[0]) omega_example2.append(densOmega[1]) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False for ii in range(nexample): bovy_plot.bovy_plot(apars,dens_example2[ii]/dens_unp+2.*ii,lw=2.5, color='k', xrange=[0.,1.], yrange=[0.,2.*nexample+1.], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\mathrm{density}/\mathrm{smooth\ density}+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii,lw=1.5,color='k',ls='--',zorder=0) overplot=True if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1like_densexample.pdf')) bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=18.) figsize(6,7) overplot= False for ii in range(nexample): bovy_plot.bovy_plot(apars,omega_example2[ii]/omega_unp+2.*ii,lw=2.5, color='k', xrange=[0.,1.], yrange=[0.,2.*nexample], xlabel=r'$\Delta \theta_\parallel$', ylabel=r'$\langle\Delta \Omega_\parallel\rangle\big/\langle\Delta \Omega_\parallel^0\rangle+\mathrm{constant}$', overplot=overplot) plot(apars,apars*0.+1.+2.*ii,lw=1.5,color='k',ls='--',zorder=0) overplot= True if save_figures: bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2016-stream-stats','gd1like_omegaexample.pdf')) ```
github_jupyter
# Chapter 3: Inferential statistics [Link to outline](https://docs.google.com/document/d/1fwep23-95U-w1QMPU31nOvUnUXE2X3s_Dbk5JuLlKAY/edit#heading=h.uutryzqeo2av) Concept map: ![concepts_STATS.png](attachment:09eb3a54-abf3-4e54-bf16-6a6399de6438.png) #### Notebook setup ``` # loading Python modules import math import random import numpy as np import pandas as pd import seaborn as sns from scipy.stats.distributions import norm # set random seed for repeatability np.random.seed(42) # notebooks figs setup %matplotlib inline import matplotlib.pyplot as plt sns.set(rc={'figure.figsize':(8,5)}) blue, orange = sns.color_palette()[0], sns.color_palette()[1] # silence annoying warnings import warnings; warnings.filterwarnings('ignore') ``` ## Overview - Main idea = learn about a population based on a sample - Recall Amy's two research questions about the employee lifetime value (ELV) data: - Question 1 = Is there a difference between ELV of the two groups? → **hypothesis testing** - Question 2 = How much difference in ELV does stats training provide? → **estimation** - Inferential statistics provides us with tools to answer both of these questions ## Estimators We'll begin our study of inferential statistics by introducing **estimators**, which are used for both **hypothesis testing** and **estimation**. ![high level stats for overview.png](attachment:8837c882-ebf7-4203-b625-c8f01f84a55b.png) $\def\stderr#1{\mathbf{se}_{#1}}$ $\def\stderrhat#1{\hat{\mathbf{se}}_{#1}}$ ### Definitions - We use the term "estimator" to describe a function $f$ that takes samples as inputs, which is written mathematically as: $$ f \ \colon \underbrace{\mathcal{X}\times \mathcal{X}\times \cdots \times \mathcal{X}}_{n \textrm{ copies}} \quad \to \quad \mathbb{R}, $$ where $n$ is the samples size and $\mathcal{X}$ denotes the possible values of the random variable $X$. - We give different names to estimators, depending on the use case: - **statistic** = a function computed from samples (descriptive statistics) - **parameter estimators** = statistics that estimates population parameters - **test statistic** = an estimator used as part of hypothesis testing procedure - The **value** of the estimator $f(\mathbf{x})$ is computer from a particular sample $\mathbf{x}$. - The **sampling distribution** of an estimator is when $f$ is the distribution of $f(\mathbf{X})$, where $\mathbf{X}$ is a random sample. - Example of estimators we discussed in descriptive statistics: - Sample mean - estimator: $\overline{x} = g(\mathbf{x}) = \frac{1}{n}\sum_{i=1}^n x_i$ - gives an estimate for the population mean $\mu$ - sampling distribution: $\overline{X} = g(\mathbf{X}) = \frac{1}{n}\sum_{i=1}^n X_i$ - Sample variance - estimator: $s^2 = h(\mathbf{x}) = \frac{1}{n-1}\sum_{i=1}^n (x_i-\overline{x})^2$ - gives an estimate for the population variance $\sigma^2$ - sampling distribution: $S^2 = h(\mathbf{X}) = \frac{1}{n-1}\sum_{i=1}^n (X_i-\overline{X})^2$ - In this notebook we focus on one estimator: **difference between group means** - estimator: $d = \texttt{mean}(\mathbf{x}_A) - \texttt{mean}(\mathbf{x}_{B}) = \overline{x}_{A} - \overline{x}_{B}$ - gives an estimate for the difference between population means: $\Delta = \mu_A - \mu_{B}$ - sampling distribution: $D = \overline{X}_A - \overline{X}_{B}$, which is a random variable ### Difference between group means Consider two random variables $X_A$ and $X_B$: $$ \large X_A \sim \mathcal{N}\!\left(\mu_A, \sigma^2_A \right) \qquad \textrm{and} \qquad X_B \sim \mathcal{N}\!\left(\mu_B, \sigma^2_B \right) $$ that describe the probability distribution for groups A and B, respectively. - A sample of size $n_A$ from $X_A$ is denoted $\mathbf{x}_A = x_1x_2\cdots x_{n_A}$=`xA`, and let $\mathbf{x}_B = x_1x_2\cdots x_{n_B}$=`xB` be a random sample of size $n_B$ from $X_B$. - We compute the mean in each group: $\overline{x}_{A} = \texttt{mean}(\mathbf{x}_A)$ and $\overline{x}_{B} = \texttt{mean}(\mathbf{x}_B)$ - The value of the estimator is $d = \overline{x}_{A} - \overline{x}_{B}$ ``` def dmeans(xA, xB): """ Estimator for the difference between group means. """ d = np.mean(xA) - np.mean(xB) return d ``` Note the difference between group means is precisely the estimator Amy need for her analysis (**Group S** and **Group NS**). We intentionally use the labels **A** and **B** to illustrate the general case. ``` # example parameters for each group muA, sigmaA = 300, 10 muB, sigmaB = 200, 20 # size of samples for each group nA = 5 nB = 4 ``` #### Particular value of the estimator `dmeans` ``` xA = norm(muA, sigmaA).rvs(nA) # random sample from Group A xB = norm(muB, sigmaB).rvs(nB) # random sample from Group B d = dmeans(xA, xB) d ``` The value of $d$ computed from the samples is an estimate for the difference between means of two groups: $\Delta = \mu_A - \mu_{B}$ (which we know is $100$ in this example). #### Sampling distribution of the estimator `dmeans` How well does the estimate $d$ approximate the true value $\Delta$? **What is the accuracy and variability of the estimates we can expect?** To answer these questions, consider the random samples $\mathbf{X}_A = X_1X_2\cdots X_{n_A}$ and $\mathbf{X}_B = X_1X_2\cdots X_{n_B}$, then compute the **sampling distribution**: $D = \overline{X}_A - \overline{X}_{B}$. By definition, the sampling distribution of the estimator is obtained by repeatedly generating samples `xA` and `xB` from the two distributions and computing `dmeans` on the random samples. For example, we can obtain the sampling distribution by generating $N=1000$ samples. ``` def get_sampling_dist(statfunc, meanA, stdA, nA, meanB, stdB, nB, N=1000): """ Obtain the sampling distribution of the statistic `statfunc` from `N` random samples drawn from groups A and B with parmeters: - Group A: `nA` values taken from `norm(meanA, stdA)` - Group B: `nB` values taken from `norm(meanB, stdB)` Returns a list of samples from the sampling distribution of `statfunc`. """ sampling_dist = [] for i in range(0, N): xA = norm(meanA, stdA).rvs(nA) # random sample from Group A xB = norm(meanB, stdB).rvs(nB) # random sample from Group B stat = statfunc(xA, xB) # evaluate `statfunc` sampling_dist.append(stat) # record the value of statfunc return sampling_dist # Generate the sampling distirbution for dmeans dmeans_sdist = get_sampling_dist(statfunc=dmeans, meanA=muA, stdA=sigmaA, nA=nA, meanB=muB, stdB=sigmaB, nB=nB) print("Generated", len(dmeans_sdist), "values from `dmeans(XA, XB)`") # first 3 values dmeans_sdist[0:3] ``` #### Plot the sampling distribution of `dmeans` ``` fig3, ax3 = plt.subplots() title3 = "Samping distribution of D = mean($\mathbf{X}_A$) - mean($\mathbf{X}_B$) " + \ "for samples of size $n_A$ = " + str(nA) + \ " from $\mathcal{N}$(" + str(muA) + "," + str(sigmaA) + ")" + \ " and $n_B$ = " + str(nB) + \ " from $\mathcal{N}$(" + str(muB) + "," + str(sigmaB) + ")" sns.distplot(dmeans_sdist, kde=False, norm_hist=True, ax=ax3) _ = ax3.set_title(title3) ``` #### Theoretical model for the sampling distribution of `dmeans` Let's use probability theory to build a theoretical model for the sampling distribution of the difference-between-means estimator `dmeans`. - The central limit theorem the rules of to obtain a model for the random variable $D = \overline{X}_A - \overline{X}_{B}$, which describes the sampling distribution of `dmeans`. - The central limit theorem tells us the sample mean within the two group are $$ \large \overline{X}_A \sim \mathcal{N}\!\left(\mu_A, \tfrac{\sigma^2_A}{n_A} \right) \qquad \textrm{and} \qquad \overline{X}_B \sim \mathcal{N}\!\left(\mu_B, \tfrac{\sigma^2_B}{n_B} \right) $$ - The rules of probability theory tells us that the [difference of two normal random variables](https://en.wikipedia.org/wiki/Sum_of_normally_distributed_random_variables#Independent_random_variables) requires subtracting their means and adding their variance, so we get: $$ \large D \sim \mathcal{N}\!\left(\mu_A - \mu_B, \ \tfrac{\sigma^2_A}{n_A} + \tfrac{\sigma^2_B}{n_B} \right) $$ In other words, the sampling distribution for the difference of means estimator has mean and standard deviation given by: $$ \large \mu_D = \mu_A - \mu_B \qquad \textrm{and} \qquad \sigma_D = \sqrt{ \tfrac{\sigma^2_A}{n_A} + \tfrac{\sigma^2_B}{n_B} } $$ Let's plot the theoretical prediction on top of the simulated data to see if they are a good fit. ``` Dmean = muA - muB Dstd = np.sqrt(sigmaA**2/nA + sigmaB**2/nB) print("Probability theory predicts the sampling distribution had" "mean", round(Dmean, 3), "and standard deviation", round(Dstd, 3)) x = np.linspace(min(dmeans_sdist), max(dmeans_sdist), 10000) D = norm(Dmean, Dstd).pdf(x) label = 'Theory prediction' ax3 = sns.lineplot(x, D, ax=ax3, label=label, color=blue) fig3 ``` ### Regroup and reality check How are you doing, dear readers? I know this was a lot of math and a lot of code, but the good news is we're done now! The key things to remember is that we have two ways to compute sampling distribution for any estimator: - Repeatedly generate random samples from model and compute the estimator values (histogram) - Use probability theory to obtain a analytical formula #### Why are we doing all this modelling? The estimator `dmeans` we defined above measures the quantity we're interested in: the difference between the means of two groups (**Group S** and **Group NS** in Amy's statistical analysis of ELV data). Using the functions we developed above, we now have the ability to simulate the data from any two groups by simply choosing the appropriate parameters. In particular if we choose `stdS=266`, `nS=30`; and `stdNS=233`, `nNS=31`, we can generate random data that has similar variability to Amy ELV measurements. Okay, dear reader, we're about to jump into the deep end of the statistics pool: **hypothesis testing**, which is one of the two major ideas in the STATS 101 curriculum. Heads up this will get complicated, but we have to go into it because it is an essential procedure that is used widely in science, engineering, business, and other types of research. You need to trust me this one: it's worth knowing this stuff, even if it is boring. Don't worry about it though, since you have all the prerequisites needed to get through this! ____ Recall Amy's research Question 1: Is there a difference between ELV of the employees in **Group S** and the employees in **Group NS**? ## Hypothesis testing - An approach to formulating research questions as **yes-no decisions** and a **procedure for making these decisions** - Hypothesis testing is a standardized procedure for doing statistical analysis (also, using stats jargon makes everything look more convincing ;) - We formulate research question as two **competing hypotheses**: - **Null hypothesis $H_0$** = no effect in our example: "no difference between means," which is written as $\color{red}{\mu_S = \mu_{NS} = \mu_0}$. In other words, the probability models for the two groups are: $$ \large H_0: \qquad X_S = \mathcal{N}(\color{red}{\mu_0}, \sigma_S) \quad \textrm{and} \quad X_{NS} = \mathcal{N}(\color{red}{\mu_0}, \sigma_{NS}) \quad $$ - **Alternative hypothesis $H_A$** = an effect exists in our example: "means for Group S different from mean for Group NS", $\color{blue}{\mu_S} \neq \color{orange}{\mu_{NS}}$. The probability models for the two groups are: $$ H_A: \qquad X_S = \mathcal{N}(\color{blue}{\mu_S}, \sigma_S) \quad \textrm{and} \quad X_{NS} = \mathcal{N}(\color{orange}{\mu_{NS}}, \sigma_{NS}) $$ - The purpose of hypothesis testing is to perform a basic sanity-check to show the difference between the group means we observed ($d = \overline{x}_{S} - \overline{x}_{NS} = 130$) is **unlikely to have occurred by chance** - NEW CONCEPT: $p$-value is the probability of observing $d$ or more extreme under the null hypothesis. ### Overview of the hypothesis testing procedure Here is the high-level overview of the hypothesis testing procedure: - **inputs**: sample statistics computed from the observed data (in our case the signal $\overline{x}_S$, $\overline{x}_{NS}$, and our estimates of the noise $s^2_S$, and $s^2_{NS}$) - **outputs**: a decision that is one of: "reject the null hypothesis" or "fail to reject the null hypothesis" ![hypothesis testing for overview.png](attachment:f1abf698-e8fb-4844-aeb8-58df5352b68f.png) We'll now look at two different approaches for computing the sampling distribution of the difference between group means statistic, $D = \overline{X}_S - \overline{X}_{NS}$: permutation tests and analytical approximations. ### Interpreting the results of hypothesis testing (optional) - The implication of rejecting the null hypothesis (no difference) is that there must is a difference between the group means. In other words, the ELV data for employees who took the statistics training (**Group S**) is different form the average ELV for employees who didn't take the statistics training (**Group NS**), which is what Amy is trying to show. - Note that rejecting null hypothesis (H0) is not the same as "proving" the alternative hypothesis (HA), we have just shown that the data is unlikely under the null hypothesis and we must be *some* difference between the groups, so is worth looking for *some* alternative hypothesis. - The alternative hypothesis we picked above, $\mu_S \neq \mu_{NS}$, is just a placeholder, that includes desirable effect: $\mu_S > \mu_{NS}$ (stats training improves ELV), but also includes the opposite effect: $\mu_S < \mu_{NS}$ (stats training decreases ELV). - Using statistics jargon, when we reject the hypothesis H0 we say we've observed a "statistically significant" result, which sounds a lot more impressive statement than it actually is. Recall hypothesis test is just used to rule out "occurred by chance," which is a very basic sanity check. - The implication of failing to reject the null hypothesis is that the observed difference between means is "not significant," meaning it could have occurred by chance, so there is no need to search for an alternative hypothesis. - Note that "failing to reject" is not the same as "proving" the null hypothesis - Note also "failing to reject H0" doesn't mean we reject HA. In fact, the alternative hypothesis didn't play any role in the calculations whatsoever. I know all this sounds super complicated and roundabout (an it is!), but you will get a hang of it in no time with some practice. Trust me, you need to know this shit. ### Start by load data again... First things first, let's reload the data which we prepared back in the DATA where we left off back in the [01_DATA.ipynb](./01_DATA.ipynb) notebook. ``` df = pd.read_csv('data/employee_lifetime_values.csv') df # remember the descriptive statistics df.groupby("group").describe() def dmeans(sample): """ Compute the difference between groups means. """ xS = sample[sample["group"]=="S"]["ELV"] xNS = sample[sample["group"]=="NS"]["ELV"] d = np.mean(xS) - np.mean(xNS) return d # the observed value in Amy's data dmeans(df) ``` Our goal is to determine how likely or unlikely this observed value is under the null hypothesis $H_0$. In the next two sections, we'll look at two different approaches for obtaining the sampling distribution of $D$ under $H_0$. ## Approach 1: Permutation test for hypothesis testing - The permutation test allow us to reject $H_0$ using existing sample $\mathbf{x}$ that we have, treating the sample as if it were a population. - Relevant probability distributions: - Sampling distribution = obtained from repeated samples from a hypothetical population under $H_0$. - Approximate sampling distribution: obtained by **resampling data from the single sample we have**. - Recall Goal 1: make sure data cannot be explained by $H_0$ (observed difference due to natural variability) - We want to obtain an approximation of the sampling distribution under $H_0$ - The $H_0$ probability model describes a hypothetical scenario with **no difference between groups**, which means data from **Group S** and **Group NS** comes the same distribution. - To generate a new random sample $\mathbf{x}^p$ from $H_0$ model we can reuse the sample we have obtained $\mathbf{x}$, but randomly mix-up the group labels. Since under the $H_0$ model, the **S** and **NS** populations are identical, mixing up the labels should have no effect. - The math term for "mixing up" is **permutation**, meaning each value is input is randomly reassigned to a new random place in the output. ``` def resample_under_H0(sample, groupcol="group"): """ Return a copy of the dataframe `sample` with the labels in the column `groupcol` modified based on a random permutation of the values in the original sample. """ resample = sample.copy() labels = sample[groupcol].values newlabels = np.random.permutation(labels) resample[groupcol] = newlabels return resample resample_under_H0(df) # resample resample = resample_under_H0(df) # compute the difference in means for the new labels dmeans(resample) ``` The steps in the above code cell give us a simple way to generate samples from the null hypothesis and compute the value of `dmeans` statistic for these samples. We used the assumption of "no difference" under the null hypothesis, and translated this to the "forget the labels" interpretation. #### Running a permutation test We can repeat the resampling procedure `10000` times to get the sampling distribution of $D$ under $H_0$, as illustrated in the code procedure below. ``` def permutation_test(sample, statfunc, groupcol="group", permutations=10000): """ Compute the p-value of the observed `statfunc(sample)` under the null hypothesis where the labels in the `groupcol` are randomized. """ # 1. compute the observed value of the statistic for the sample obsstat = statfunc(sample) # 2. generate the sampling distr. using random permutations of the group labels resampled_stats = [] for i in range(0, permutations): resample = resample_under_H0(sample, groupcol=groupcol) restat = statfunc(resample) resampled_stats.append(restat) # 3. compute p-value: how many `restat`s are equal-or-more-extreme than `obsstat` tailstats = [restat for restat in resampled_stats \ if restat <= -abs(obsstat) or restat >= abs(obsstat)] pvalue = len(tailstats) / len(resampled_stats) return resampled_stats, pvalue sampling_dist, pvalue = permutation_test(df, statfunc=dmeans) # plot the sampling distribution in blue sns.displot(sampling_dist, bins=200) # plot red line for the observed statistic obsstat = dmeans(df) plt.axvline(obsstat, color='r') # plot the values that are equal or more extreme in red tailstats = [rs for rs in sampling_dist if rs <= -obsstat or rs >= obsstat] _ = sns.histplot(tailstats, bins=200, color="red") ``` - Once we have the sampling distribution of `D` under $H_0$, we can see where the observed value $d=130$ falls within this distribution. - p-value: the probability of observing value $d$ or more extreme under the null hypothesis ``` pvalue ``` We can now make the decision based on the $p$-value and a pre-determined threshold: - If the observed value $d$ is unlikely under $H_0$ ($p$-value less than 5% chance of occurring), then our decision will be to "reject the null hypothesis." - Otherwise, if the observed value $d$ is not that unusual ($p$-value greater than 5%), we conclude that we have "failed to reject the null hypothesis." ``` if pvalue < 0.05: print("DECISION: Reject H0", "( p-value =", pvalue, ")") print(" There is a statistically significant difference between xS and xNS means") else: print("DECISION: Fail to reject H0") print(" The difference between groups means could have occurred by chance") ``` #### Permutations test using SciPy The above code was given only for illustrative purposes. In practice, you can use the SciPy implementation of permutation test, by calling `ttest_ind(..., permutations=10000)` to perform a permutation test, then obtain the $p$-value. ``` from scipy.stats import ttest_ind xS = df[df["group"]=="S"]["ELV"] xNS = df[df["group"]=="NS"]["ELV"] ttest_ind(xS, xNS, permutations=10000).pvalue ``` #### Discussion - The procedure we used is called a **permutations test** for comparison of group means. - The permutation test takes it's name from the action of mixing up the group-membership labels and computing a statistic which is a way to generate samples from the null hypothesis in situations where we're comparing two groups. - Permutation tests are very versatile since we can use them for any estimator $h(\mathbf{x})$. For example, we could have used difference in medians by specifying the `median` as the input `statfunc`. ## Approach 2: Analytical approximations for hypothesis testing We'll now look at another approach for answering Question 1: using and analytical approximation, which is the way normally taught in STATS 101 courses. How likely or unlikely is the observed difference $d=130$ under the null hypothesis? - Analytical approximations are math models for describing the sampling distribution under $H_0$ - Sampling distributions = obtained by repeated sampling from $H_0$ - Analytical approximation = probability distribution model based on estimated parameters - Assumption: population is normally distributed - Based on this assumption we can use the theoretical model we developed above for difference between group means to obtain a **closed form expression** for the sampling distribution of $D$ - In particular, the probability model for the two groups under $H_0$ are: $$ \large H_0: \qquad X_S = \mathcal{N}(\color{red}{\mu_0}, \sigma_S) \quad \textrm{and} \quad X_{NS} = \mathcal{N}(\color{red}{\mu_0}, \sigma_{NS}), \quad $$ from which we can derive the model for $D = \overline{X}_S - \overline{X}_{NS}$: $$ \large D \sim \mathcal{N}\!\left( \color{red}{0}, \ \tfrac{\sigma^2_S}{n_S} + \tfrac{\sigma^2_{NS}}{n_{NS}} \right) $$ In words, the sampling distribution of the difference between group means is normally distributed with mean $\mu_D = 0$ and variance $\sigma^2_D$ dependent on the variance of the two groups $\sigma^2_S$ and $\sigma^2_{NS}$. Recall we obtained this expression earlier when we discussed difference of means between groups A and B. - However, the population variances are unknown $\sigma^2_S$ and $\sigma^2_{NS}$, and we only have the estimated variances $s_S^2$ and $s_{NS}^2$ calculated from the sample. - That's OK though, since sample variances are good approximation to the population variances. There are two common ways to obtain an approximation for $\sigma^2_D$: - Pooled variance: $\sigma^2_D \approx s^2_p = \frac{(n_S-1)s_S^2 \; + \; (n_{NS}-1)s_{NS}^2}{n_S + n_{NS} - 2}$ (takes advantage of assumption that both samples come from the same population under $H_0$) - Unpooled variance: $\sigma^2_D \approx s^2_u = \tfrac{s^2_S}{n_S} + \tfrac{s^2_{NS}}{n_{NS}}$ (follows from general rule of prob theory) - NEW CONCEPT: **Student's $t$-distribution** is a model for $D$ which takes into account we are using $s_S^2$ and $s_{NS}^2$ instead of $\sigma_S^2$ and $\sigma_{NS}^2$. - NEW CONCEPT: **degrees of freedom**, denoted `dof` in code or $\nu$ (Greek letter *nu*) in equations, is the parameter Student's $t$ distribution related to the sample size used to estimate quantities. ### Student's t-test (pooled variance) [Student's t-test for comparison of difference between groups means](https://statkat.com/stattest.php?&t=9), is a procedure that makes use of the pooled variance $s^2_p$. #### Black-box approach The `scipy.stats` function `ttest_ind` will perform all the steps of the $t$-test procedure, without the need for us to understand the details. ``` from scipy.stats import ttest_ind # extract data for two groups xS = df[df["group"]=="S"]['ELV'] xNS = df[df["group"]=="NS"]['ELV'] # run the complete t-test procedure for ind-ependent samples: result = ttest_ind(xS, xNS) result.pvalue ``` The $p$-value is less than 0.05 so our decision is to **reject the null hypothesis**. #### Student's t-test under the hood The computations hidden behind the function `ttest_ind` involve a six step procedure that makes use of the pooled variance $s^2_p$. ``` from statistics import stdev from scipy.stats.distributions import t # 1. calculate the mean in each group meanS, meanNS = np.mean(xS), np.mean(xNS) # 2. calculate d, the observed difference between means d = meanS - meanNS # 3. calculate the standard deviations in each group stdS, stdNS = stdev(xS), stdev(xNS) nS, nNS = len(xS), len(xNS) # 4. compute the pooled variance and standard error var_pooled = ((nS-1)*stdS**2 + (nNS-1)*stdNS**2)/(nS + nNS - 2) std_pooled = np.sqrt(var_pooled) std_err = np.sqrt(std_pooled**2/nS + std_pooled**2/nNS) # 5. compute the value of the t-statistic tstat = d / std_err # 6. obtain the p-value for the t-statistic from a # t-distribution with 31+30-2 = 59 degrees of freedom dof = nS + nNS - 2 pvalue = 2 * t(dof).cdf(-abs(tstat)) # 2* because two-sided pvalue ``` #### Welch's t-test (unpooled variances) An [alternative t-test procedure](https://statkat.com/stattest.php?&t=9) that doesn't assume the variances in groups are equal. ``` result2 = ttest_ind(xS, xNS, equal_var=False) result2.pvalue ``` Welch's $t$-test differs only in steps 4 through 6 as shown below: ``` # 4'. compute the unpooled standard deviation of D stdD = np.sqrt(stdS**2/nS + stdNS**2/nNS) # 5'. compute the value of the t-statistic tstat = d / stdD # 6'. obtain the p-value from a t-distribution with # (insert crazy formula here) degrees of freedom dof = (stdS**2/nS + stdNS**2/nNS)**2 / \ ((stdS**2/nS)**2/(nS-1) + (stdNS**2/nNS)**2/(nNS-1) ) pvalue = 2 * t(dof).cdf(-abs(tstat)) # 2* because two-sided pvalue ``` ### Summary of Question 1 We saw two ways to answer Question 1 (is there a difference between group means) and obtain the p-value. We interpreted the small p-values as evidence that the observed difference, $d=130$, is unlikely to be due to chance, i.e. we rejected the null hypothesis. Note this whole procedure is just a sanity check—we haven't touched the alternative hypothesis at all yet, and for all we know the stats training could have the effect of decreasing ELV! ____ It's time to study Question 2, which is to estimate the magnitude of the change in ELV obtained from completing the stats training, which is called *effect size* in statistics. ## Estimating the effect size - Question 2 of statistical analysis is to estimate the difference in ELV gained by stats training. - NEW CONCEPT: **effect size** is a measure of difference between intervention and control groups. - We assume the data of **Group S** and **Group NS** come from different populations with means $\mu_S$ and $\mu_{NS}$ - We're interested in the difference between population means, denoted $\Delta = \mu_S - \mu_{NS}$. - By analyzing the sample, we have obtained an estimate $d=130$ for the unknown $\Delta$, but we know our data contains lots of variability, so we know our estimate might be off. - We want an answer to Question 2 (What is the estimated difference between group means?) that takes into account the variability of the data. - NEW CONCEPT: **confidence interval** is a way to describe a range of values for an estimate - We want to provide an answer to Question 2 in the form of a confidence interval that tells us a range of values where we believe the true value of $\Delta$ falls. - Similar to how we showed to approaches for hypothesis testing, we'll work on effect size estimation using two approaches: resampling methods and analytical approximations. ### Approach 1: estimate the effect size using bootstrap method - We want to estimate the distribution of ELV values for the two groups, and compute the difference between the means of these distributions. - Distributions: - Sampling distributions = obtained by repeated sampling from the populations - Bootstrap sampling distributions = resampling data from the samples we have (with replacement) - Intuition: treat the samples as if they were the population - We'll compute $B=5000$ bootstrap samples from the two groups and compute the difference, then look at the distribution of the bootstrap sample difference to obtain $CI_{\Delta}$, the confidence interval for the difference between population means. ``` from statistics import mean def bootstrap_stat(sample, statfunc=mean, B=5000): """ Compute the bootstrap estimate of the function `statfunc` from the sample. Returns a list of statistic values from bootstrap samples. """ n = len(sample) bstats = [] for i in range(0, B): bsample = np.random.choice(sample, n, replace=True) bstat = statfunc(bsample) bstats.append(bstat) return bstats # load data for two groups df = pd.read_csv('data/employee_lifetime_values.csv') xS = df[df["group"]=="S"]['ELV'] xNS = df[df["group"]=="NS"]['ELV'] # compute bootstrap estimates for mean in each group meanS_bstats = bootstrap_stat(xS, statfunc=mean) meanNS_bstats = bootstrap_stat(xNS, statfunc=mean) # compute the difference between means from bootstrap samples dmeans_bstats = [] for bmeanS, bmeanNS in zip(meanS_bstats, meanNS_bstats): d = bmeanS - bmeanNS dmeans_bstats.append(d) sns.displot(dmeans_bstats) # 90% confidence interval for the difference in means CI_boot = [np.percentile(dmeans_bstats, 5), np.percentile(dmeans_bstats, 95)] CI_boot ``` #### SciPy bootstrap method ``` from scipy.stats import bootstrap def dmeans2(sample1, sample2): return np.mean(sample1) - np.mean(sample2) res = bootstrap((xS, xNS), statistic=dmeans2, vectorized=False, confidence_level=0.9, n_resamples=5000, method='percentile') CI_boot2 = [res.confidence_interval.low, res.confidence_interval.high] CI_boot2 ``` ### Approach 2: Estimates using analytical approximation method - Assumption 1: populations for **Group S** and **Group NS** are normally distributed - Assumption 2: the variance of the two populations is the same (or approximately equal) - Using the theoretical model for the populations, we can obtain a formula for CI of effect size $\Delta$: $$ \textrm{CI}_{(1-\alpha)} = \left[ d - t^*\!\cdot\!\sigma_D, \, d + t^*\!\cdot\!\sigma_D \right]. $$ The confidence interval is centred at $d$, with width proportional to the standard deviation $\sigma_D$. The constant $t^*$ denotes the value of the inverse CDF of Student's $t$-distribution with appropriate number of degrees of freedom `dof` evaluated at $1-\frac{\alpha}{2}$. For a 90% confidence interval, we choose $\alpha=0.10$, which gives $(1-\frac{\alpha}{2}) = 0.95$, $t^* = F_{T_{\textrm{dof}}}^{-1}\left(0.95\right)$. - We can use the two different analytical approximations to obtain a formula for $\sigma_D$ just as we did in the hypothesis testing: - Pooled variance: $\sigma^2_p = \frac{(n_S-1)s_S^2 + (n_{NS}-1)s_{NS}^2}{n_S + n_{NS} - 2}$, and `dof` = $n_S + n_{NS} -2$ - Unpooled variance: $\sigma^2_u = \tfrac{s^2_A}{n_A} + \tfrac{s^2_B}{n_B}$, and `dof` = [...](https://en.wikipedia.org/wiki/Student%27s_t-test#Equal_or_unequal_sample_sizes,_unequal_variances_(sX1_%3E_2sX2_or_sX2_%3E_2sX1)) #### Using pooled variance The calculations are similar to Student's t-test for hypothesis testing. ``` from scipy.stats.distributions import t d = np.mean(xS) - np.mean(xNS) nS, nNS = len(xS), len(xNS) stdS, stdNS = stdev(xS), stdev(xNS) var_pooled = ((nS-1)*stdS**2 + (nNS-1)*stdNS**2)/(nS + nNS - 2) std_pooled = np.sqrt(var_pooled) std_err = std_pooled * np.sqrt(1/nS + 1/nNS) dof = nS + nNS - 2 # for 90% confidence interval, need 10% in tails alpha = 0.10 # now use inverse-CDF of Students t-distribution tstar = abs(t(dof).ppf(alpha/2)) CI_tpooled = [d - tstar*std_err, d + tstar*std_err] CI_tpooled ``` #### Using unpooled variance The calculations are similar to the Welch's t-test for hypothesis testing. ``` d = np.mean(xS) - np.mean(xNS) nS, nNS = len(xS), len(xNS) stdS, stdNS = stdev(xS), stdev(xNS) stdD = np.sqrt(stdS**2/nS + stdNS**2/nNS) dof = (stdS**2/nS + stdNS**2/nNS)**2 / \ ((stdS**2/nS)**2/(nS-1) + (stdNS**2/nNS)**2/(nNS-1) ) # for 90% confidence interval, need 10% in tails alpha = 0.10 # now use inverse-CDF of Students t-distribution tstar = abs(t(dof).ppf(alpha/2)) CI_tunpooled = [d - tstar*stdD, d + tstar*stdD] CI_tunpooled ``` #### Summary of Question 2 results We now have all the information we need to give a precise and nuanced answer to Question 2: "How big is the increase in ELV produced by stats training?". The basic estimate of the difference is $130$ can be reported, and additionally can can report the 90% confidence interval for the difference between group means, that takes into account the variability in the data we have observed. Note the CIs obtained using different approaches are all similar (+/- 5 ELV points), so it doesn't matter much which approach we use: ``` CI_boot, CI_boot2, CI_tpooled, CI_tunpooled ``` ### Standardized effect size (optional) It is sometimes useful to report the effect size using a "standardized" measure for effect sizes. *Cohen's $d$* one such measure, and it is defined as the difference between two means divided by the pooled standard deviation. ``` def cohend(sample1, sample2): """ Compute Cohen's d measure of effect size for two independent samples. """ n1, n2 = len(sample1), len(sample2) mean1, mean2 = np.mean(sample1), np.mean(sample2) var1, var2 = np.var(sample1, ddof=1), np.var(sample2, ddof=1) # calculate the pooled variance and standard deviaiton var_pooled = ((n1-1)*var1 + (n2-1)*var2) / (n1 + n2 - 2) std_pooled = np.sqrt(var_pooled) # compute Cohen's d cohend = (mean1 - mean2) / std_pooled return cohend cohend(xS, xNS) ``` We can interpret the value of Cohen's d obtained using the [reference table](https://en.wikipedia.org/wiki/Effect_size#Cohen's_d) of values: | Cohen's d | Effect size | | ----------- | ----------- | | 0.01 | very small | | 0.20 | small | | 0.50 | medium | | 0.80 | large | We can therefore say the effect size of offering statistics training for employees has an **medium** effect size. ## Conclusion of Amy's statistical analysis Recall the two research questions that Amy set out to answer in the beginning of this video series: - Question 1: Is there a difference between the means in the two groups? - Question 2: How much does statistics training improve the ELV of employees? The statistical analysis we did allows us to answer these two questions as follows: - Answer 1: There is a statistically significant difference between Group S and Group NS, p = 0.048. - Answer 2: The estimated improvement in ELV is 130 points, which is corresponds to Cohen's d value of 0.52 (medium effect size). A 90% confidence interval for the true effect size is [25.9, 234.2]. Note: we used the numerical results obtained from resampling methods (Approach 1), but conclusions would be qualitatively the same if we reported results obtained from analytical approximations (Approach 2). ### Using statistics for convincing others You may be wondering if all this probabilistic modelling and complicated statistical analysis was worth it to reach a conclusion that seems obvious in retrospect. Was all this work worth it? The purpose of all this work is to obtains something close to an objective conclusion. Without statistics it is very easy to fool ourselves and interpret patterns in data the way we want to, or alternatively, not see patterns that are present. By following the standard statistical procedures, we're less likely to fool ourselves, and more likely to be able to convince others. It can very useful to imagine Amy explaining the results to a skeptical colleague. Suppose the colleague is very much against the idea of statistical training, and sees it as a distraction, saying things like "We hire employees to do a job, not to play with Python." and "I don't know any statistics and I'm doing my job just fine!" You get the picture. Imagine Amy presenting her findings about how 100 hours of statistical training improves employee lifetime value (ELV) results after one year, and suggesting the statistical training be implemented for all new hires from now on. The skeptical colleague immediately rejects the idea and questions Amy's recommendation using emotional arguments like about necessity, time wasting, and how statistics is a specialty topic that is not required for all employees. Instead of arguing based on opinions and emotions with her colleague, Amy explains her recommendation is based on a statistical experiment she conducted, and shows the results. - When the colleague asks if the observed difference could be due to chance, Amy says that this is unlikely, and quotes the p-value of 0.048 (less than 0.05), and interprets the result as saying the probability of observed difference between **Group S** and **Group NS** to be due to chance is less than 5%. - The skeptical colleague is forced to concede that statistical training does improve ELV, but then asks about the effect size of the improvement: "How much more ELV can we expect if we provide statistics training?" Amy is ready to answer quoting the observed difference of $130$ ELV points, and further specifies the 90% confidence interval of [25.9, 234.2] for the improvement, meaning in the worst case there is 25 ELV points improvement. The skeptic is forced to back down from their objections, and the "stats training for all" program is adopted in the company. Not only was Amy able to win the argument using statistics, but she was also able to set appropriate expectations for the results. In other words, she hasn't promised a guaranteed +130 ELV improvement, but a realistic range of values that can be expected. ## Comparison of resampling methods and analytical approximations In this notebook we saw two different approaches for doing statistical analysis: resampling methods and analytical approximations. This is a general pattern in statistics where there is not only one correct answer: multiple approaches to data analysis are valid, and you need to think about the specifics of each data analysis situation. You'll learn about both approaches in the book. Analytical approximations currently taught in most stats courses (STAT 101). Historically, analytical approximations have been used more widely because they require only simple arithmetic calculations: statistic practitioners (scientists, engineers, etc.) simply need to compute sample statistics, plug them into a formula, and obtain a $p$-value. This convenience is at the cost of numerous assumptions about the data distribution, which often don't hold in practice (e.g. assuming population is normal, when it is isn't). In recent years, resampling methods like the permutation test and bootstrap estimation are becoming more popular and widely in industry, and increasingly also taught at to university students (*modern statistics*). **The main advantage so resampling methods is that they require less modelling assumptions.** Procedures like the permutation test can be applied broadly to any scenarios where two groups are compared, and don't require developing specific formulas for different cases. Resampling methods are easier to understand since the statistical procedure they require are directly related to the sampling distribution, and there are no formulas to memorize. Understanding resampling methods requires some basic familiarity with programming, but the skills required are not advanced: knowledge of variables, expressions, and basic `for` loop is sufficient. If you were able to follow the code examples described above (see `resample_under_H0`, `permutation_test`, and `bootstrap_stat`), then you've already **seen all the code you will need for the entire book!** ## Other statistics topics in the book The goal of this notebook was to focus on the two main ideas of inferential statistics ([Chapter 3](https://docs.google.com/document/d/1fwep23-95U-w1QMPU31nOvUnUXE2X3s_Dbk5JuLlKAY/edit#heading=h.uutryzqeo2av)): hypothesis testing and estimation. We didn't have time to cover many of the other important topics in statistics, which will be covered in the book (and in future notebooks). Here is a list of some of these topics: - Null Hypothesis Significance Testing (NHST) procedure in full details (Type I and Type II error, power, sample size calculations) - Statistical assumptions behind analytical approximations - Cookbook of statistical analysis recipes (analytical approximations for different scenarios) - Experimental design (how to plan and conduct statistical experiments) - Misuses of statistics (caveats to watch out for and mistakes to avoid) - Bayesian statistics (very deep topic; we'll cover only main ideas) - Practice problems and exercises (real knowledge is when you can do the calculations yourself) ___ So far our statistical analysis was limited to comparing two groups, which is referred to as **categorical predictor variable** using stats jargon. In the next notebook we'll learn about statistical analysis with **continuous predictor variables**: instead of comparing stats vs. no-stats, we analyze what happens when variable amount of stats training is provided (a continuous predictor variable). Open the notebook [04_LINEAR_MODELS.ipynb](./04_LINEAR_MODELS.ipynb) when you're ready to continue. ``` code = list(["um"]) ```
github_jupyter
## Compare built-in Sagemaker classification algorithms for a binary classification problem using Iris dataset In the notebook tutorial, we build 3 classification models using HPO and then compare the AUC on test dataset on 3 deployed models IRIS is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. The dataset is built-in by default into R or can also be downloaded from https://archive.ics.uci.edu/ml/datasets/iris The iris dataset, besides its historical importance, is also a fun dataset to play with since it can educate us about various ML techniques such as clustering, classification and regression, all in one dataset. The dataset is built into any base R installation, so no download is required. Attribute Information: 1. sepal length in cm 2. sepal width in cm 3. petal length in cm 4. petal width in cm 5. Species of flowers: Iris setosa, Iris versicolor, Iris virginica The prediction we will perform is `Species ~ f(sepal.length,sepal.width,petal.width,petal.length)` Predicted attribute: Species of iris plant. ### Load required libraries and initialize variables. ``` rm(list=ls()) library(reticulate) # be careful not to install reticulate again. since it can cause problems. library(tidyverse) library(pROC) set.seed(1324) ``` SageMaker needs to be imported using the reticulate library. If this was performed in a local computer, we would have to make sure that Python and appropriate SageMaker libraries are installed, but inside a SageMaker notebook R kernels, these are all pre-loaded and the R user does not have to worry about installing reticulate or Python. Session is the unique session ID associated with each SageMaker call. It remains the same throughout the execution of the program and can be recalled later to close a session or open a new session. The bucket is the Amazon S3 bucket where we will be storing our data output. The Amazon S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. The role is the role of the SageMaker notebook as when it was initially deployed. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with appropriate full IAM role arn string(s). ``` sagemaker <- import('sagemaker') session <- sagemaker$Session() bucket <- session$default_bucket() # you may replace with name of your personal S3 bucket role_arn <- sagemaker$get_execution_role() ``` ### Input the data and basic pre-processing ``` head(iris) summary(iris) ``` In above, we see that there are 50 flowers of the setosa species, 50 flowers of the versicolor species, and 50 flowers of the virginica species. In this case, the target variable is the Species prediction. We are trying to predict the species of the flower given its numerical measurements of Sepal length, sepal width, petal length, and petal width. Since we are trying to do binary classification, we will only take the flower species setosa and versicolor for simplicity. Also we will perform one-hot encoding on the categorical variable Species. ``` iris1 <- iris %>% dplyr::select(Species,Sepal.Length,Sepal.Width,Petal.Length,Petal.Width) %>% # change order of columns such that the label column is the first column. dplyr::filter(Species %in% c("setosa","versicolor")) %>% #only select two flower for binary classification. dplyr::mutate(Species = as.numeric(Species) -1) # one-hot encoding,starting with 0 as setosa and 1 as versicolor. head(iris1) ``` We now obtain some basic descriptive statistics of the features. ``` iris1 %>% group_by(Species) %>% summarize(mean_sepal_length = mean(Sepal.Length), mean_petal_length = mean(Petal.Length), mean_sepal_width = mean(Sepal.Width), mean_petal_width = mean(Petal.Width), ) ``` In the summary statistics, we observe that mean sepal length is longer than mean petal length for both flowers. ### Prepare for modelling ##### We split the train and test and validate into 70%, 15%, and 15%, using random sampling. ``` iris_train <- iris1 %>% sample_frac(size = 0.7) iris_test <- anti_join(iris1, iris_train) %>% sample_frac(size = 0.5) iris_validate <- anti_join(iris1, iris_train) %>% anti_join(., iris_test) ``` ##### We do a check of the summary statistics to make sure train, test, validate datasets are appropriately split and have proper class balance. ``` table(iris_train$Species) nrow(iris_train) ``` We see that the class balance between 0 and 1 is almost 50% each for the binary classification. We also see that there are 70 rows in the train dataset. ``` table(iris_validate$Species) nrow(iris_validate) ``` We see that the class balance in validation dataset between 0 and 1 is almost 50% each for the binary classification. We also see that there are 15 rows in the validation dataset. ``` table(iris_test$Species) nrow(iris_test) ``` We see that the class balance in test dataset between 0 and 1 is almost 50% each for the binary classification. We also see that there are 15 rows in the test dataset. ### Write the data to Amazon S3 Different algorithms in SageMaker will have different data formats required for training and for testing. These formats are created to make model production easier. csv is the most well known of these formats and has been used here as input in all algorithms to make it consistent. SageMaker algorithms take in data from an Amazon S3 object and output data to an Amazon S3 object, so data has to be stored in Amazon S3 as csv,json, proto-buf or any format that is supported by the algorithm that you are going to use. ``` write_csv(iris_train, 'iris_train.csv', col_names = FALSE) write_csv(iris_validate, 'iris_valid.csv', col_names = FALSE) write_csv(iris_test, 'iris_test.csv', col_names = FALSE) s3_train <- session$upload_data(path = 'iris_train.csv', bucket = bucket, key_prefix = 'data') s3_valid <- session$upload_data(path = 'iris_valid.csv', bucket = bucket, key_prefix = 'data') s3_test <- session$upload_data(path = 'iris_test.csv', bucket = bucket, key_prefix = 'data') s3_train_input <- sagemaker$inputs$TrainingInput(s3_data = s3_train, content_type = 'text/csv') s3_valid_input <- sagemaker$inputs$TrainingInput(s3_data = s3_valid, content_type = 'text/csv') s3_test_input <- sagemaker$inputs$TrainingInput(s3_data = s3_test, content_type = 'text/csv') ``` To perform Binary classification on Tabular data, SageMaker contains following algorithms: - XGBoost Algorithm - Linear Learner Algorithm, - K-Nearest Neighbors (k-NN) Algorithm, ## Create model 1: XGBoost model in SageMaker Use the XGBoost built-in algorithm to build an XGBoost training container as shown in the following code example. You can automatically spot the XGBoost built-in algorithm image URI using the SageMaker image_uris.retrieve API (or the get_image_uri API if using Amazon SageMaker Python SDK version 1). If you want to ensure if the image_uris.retrieve API finds the correct URI, see Common parameters for built-in algorithms and look up XGBoost from the full list of built-in algorithm image URIs and available regions. After specifying the XGBoost image URI, you can use the XGBoost container to construct an estimator using the SageMaker Estimator API and initiate a training job. This XGBoost built-in algorithm mode does not incorporate your own XGBoost training script and runs directly on the input datasets. See https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html for more information. ``` container <- sagemaker$image_uris$retrieve(framework='xgboost', region= session$boto_region_name, version='latest') cat('XGBoost Container Image URL: ', container) s3_output <- paste0('s3://', bucket, '/output_xgboost') estimator1 <- sagemaker$estimator$Estimator(image_uri = container, role = role_arn, train_instance_count = 1L, train_instance_type = 'ml.m5.4xlarge', input_mode = 'File', output_path = s3_output, output_kms_key = NULL, base_job_name = NULL, sagemaker_session = NULL) ``` How would an untuned model perform compared to a tuned model? Is it worth the effort? Before going deeper into XGBoost model tuning, let’s highlight the reasons why you have to tune your model. The main reason to perform hyper-parameter tuning is to increase predictability of our models by choosing our hyperparameters in a well thought manner. There are 3 ways to perform hyperparameter tuning: grid search, random search, bayesian search. Popular packages like scikit-learn use grid search and random search techniques. SageMaker uses Bayesian search techniques. We need to choose - a learning objective function to optimize during model training - an eval_metric to use to evaluate model performance during validation - a set of hyperparameters and a range of values for each to use when tuning the model automatically SageMaker XGBoost model can be tuned with many hyperparameters. The hyperparameters that have the greatest effect on optimizing the XGBoost evaluation metrics are: - alpha, - min_child_weight, - subsample, - eta, - num_round. The hyperparameters that are required are num_class (the number of classes if it is a multi-class classification problem) and num_round ( the number of rounds to run the training on). All other hyperparameters are optional and will be set to default values if it is not specified by the user. ``` # check to make sure which are required and which are optional estimator1$set_hyperparameters(eval_metric='auc', objective='binary:logistic', num_round = 6L ) # Set Hyperparameter Ranges, check to make sure which are integer and which are continuos parameters. hyperparameter_ranges = list('eta' = sagemaker$parameter$ContinuousParameter(0,1), 'min_child_weight'= sagemaker$parameter$ContinuousParameter(0,10), 'alpha'= sagemaker$parameter$ContinuousParameter(0,2), 'max_depth'= sagemaker$parameter$IntegerParameter(0L,10L)) ``` The evaluation metric that we will use for our binary classification purpose is validation:auc, but you could use any other metric that is right for your problem. You do have to be careful to change your objective_type to point to the right direction of Maximize or Minimize according to the objective metric you have chosen. ``` # Create a hyperparamter tuner objective_metric_name = 'validation:auc' tuner1 <- sagemaker$tuner$HyperparameterTuner(estimator1, objective_metric_name, hyperparameter_ranges, objective_type='Maximize', max_jobs=4L, max_parallel_jobs=2L) # Define the data channels for train and validation datasets input_data <- list('train' = s3_train_input, 'validation' = s3_valid_input) # train the tuner tuner1$fit(inputs = input_data, job_name = paste('tune-xgb', format(Sys.time(), '%Y%m%d-%H-%M-%S'), sep = '-'), wait=TRUE) ``` The output of the tuning job can be checked in SageMaker if needed. ### Calculate AUC for the test data on model 1 SageMaker will automatically recognize the training job with the best evaluation metric and load the hyperparameters associated with that training job when we deploy the model. One of the benefits of SageMaker is that we can easily deploy models in a different instance than the instance in which the notebook is running. So we can deploy into a more powerful instance or a less powerful instance. ``` model_endpoint1 <- tuner1$deploy(initial_instance_count = 1L, instance_type = 'ml.t2.medium') ``` The serializer tells SageMaker what format the model expects data to be input in. ``` model_endpoint1$serializer <- sagemaker$serializers$CSVSerializer(content_type='text/csv') ``` We input the `iris_test` dataset without the labels into the model using the `predict` function and check its AUC value. ``` # Prepare the test sample for input into the model test_sample <- as.matrix(iris_test[-1]) dimnames(test_sample)[[2]] <- NULL # Predict using the deployed model predictions_ep <- model_endpoint1$predict(test_sample) predictions_ep <- stringr::str_split(predictions_ep, pattern = ',', simplify = TRUE) predictions_ep <- as.numeric(predictions_ep > 0.5) # Add the predictions to the test dataset. iris_predictions_ep1 <- dplyr::bind_cols(predicted_flower = predictions_ep, iris_test) iris_predictions_ep1 # Get the AUC auc(roc(iris_predictions_ep1$predicted_flower,iris_test$Species)) ``` ## Create model 2: Linear Learner in SageMaker Linear models are supervised learning algorithms used for solving either classification or regression problems. For input, you give the model labeled examples (x, y). x is a high-dimensional vector and y is a numeric label. For binary classification problems, the label must be either 0 or 1. The linear learner algorithm requires a data matrix, with rows representing the observations, and columns representing the dimensions of the features. It also requires an additional column that contains the labels that match the data points. At a minimum, Amazon SageMaker linear learner requires you to specify input and output data locations, and objective type (classification or regression) as arguments. The feature dimension is also required. You can specify additional parameters in the HyperParameters string map of the request body. These parameters control the optimization procedure, or specifics of the objective function that you train on. For example, the number of epochs, regularization, and loss type. See https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html for more information. ``` container <- sagemaker$image_uris$retrieve(framework='linear-learner', region= session$boto_region_name, version='latest') cat('Linear Learner Container Image URL: ', container) s3_output <- paste0('s3://', bucket, '/output_glm') estimator2 <- sagemaker$estimator$Estimator(image_uri = container, role = role_arn, train_instance_count = 1L, train_instance_type = 'ml.m5.4xlarge', input_mode = 'File', output_path = s3_output, output_kms_key = NULL, base_job_name = NULL, sagemaker_session = NULL) ``` For the text/csv input type, the first column is assumed to be the label, which is the target variable for prediction. predictor_type is the only hyperparameter that is required to be pre-defined for tuning. The rest are optional. Normalization, or feature scaling, is an important preprocessing step for certain loss functions that ensures the model being trained on a dataset does not become dominated by the weight of a single feature. Decision trees do not require normalization of their inputs; and since XGBoost is essentially an ensemble algorithm comprised of decision trees, it does not require normalization for the inputs either. However, Generalized Linear Models require a normalization of their input. The Amazon SageMaker Linear Learner algorithm has a normalization option to assist with this preprocessing step. If normalization is turned on, the algorithm first goes over a small sample of the data to learn the mean value and standard deviation for each feature and for the label. Each of the features in the full dataset is then shifted to have mean of zero and scaled to have a unit standard deviation. To make our job easier, we do not have to go back to our previous steps to do normalization. Normalization is built in as a hyper-parameter in SageMaker Linear learner algorithm. So no need to worry about normalization for the training portions. ``` estimator2$set_hyperparameters(predictor_type="binary_classifier", normalize_data = TRUE) ``` The tunable hyperparameters for linear learner are: - wd - l1 - learning_rate - mini_batch_size - use_bias - positive_example_weight_mult Be careful to check which parameters are integers and which parameters are continuous because that is one of the common sources of errors. Also be careful to give a proper range for hyperparameters that makes sense for your problem. Training jobs can sometimes fail if the mini-batch size is too big compared to the training data available. ``` # Set Hyperparameter Ranges hyperparameter_ranges = list('wd' = sagemaker$parameter$ContinuousParameter(0.00001,1), 'l1' = sagemaker$parameter$ContinuousParameter(0.00001,1), 'learning_rate' = sagemaker$parameter$ContinuousParameter(0.00001,1), 'mini_batch_size' = sagemaker$parameter$IntegerParameter(10L, 50L) ) ``` The evaluation metric we will be using in our case to compare the models will be the objective loss and is based on the validation dataset. ``` # Create a hyperparamter tuner objective_metric_name = 'validation:objective_loss' tuner2 <- sagemaker$tuner$HyperparameterTuner(estimator2, objective_metric_name, hyperparameter_ranges, objective_type='Minimize', max_jobs=4L, max_parallel_jobs=2L) # Create a tuning job name job_name <- paste('tune-linear', format(Sys.time(), '%Y%m%d-%H-%M-%S'), sep = '-') # Define the data channels for train and validation datasets input_data <- list('train' = s3_train_input, 'validation' = s3_valid_input) # Train the tuner tuner2$fit(inputs = input_data, job_name = job_name, wait=TRUE, content_type='csv') # since we are using csv files as input into the model, we need to specify content type as csv. ``` ### Calculate AUC for the test data on model 2 ``` # Deploy the model into an instance of your choosing. model_endpoint2 <- tuner2$deploy(initial_instance_count = 1L, instance_type = 'ml.t2.medium') ``` For inference, the linear learner algorithm supports the application/json, application/x-recordio-protobuf, and text/csv formats. For more information, https://docs.aws.amazon.com/sagemaker/latest/dg/LL-in-formats.html ``` # Specify what data formats you want the input and output of your model to look like. model_endpoint2$serializer <- sagemaker$serializers$CSVSerializer(content_type='text/csv') model_endpoint2$deserializer <- sagemaker$deserializers$JSONDeserializer() ``` In Linear Learner the output inference files are in JSON or RecordIO formats. https://docs.aws.amazon.com/sagemaker/latest/dg/LL-in-formats.html When you make predictions on new data, the contents of the response data depends on the type of model you choose within Linear Learner. For regression (predictor_type='regressor'), the score is the prediction produced by the model. For classification (predictor_type='binary_classifier' or predictor_type='multiclass_classifier'), the model returns a score and also a predicted_label. The predicted_label is the class predicted by the model and the score measures the strength of that prediction. So, for binary classification, predicted_label is 0 or 1, and score is a single floating point number that indicates how strongly the algorithm believes that the label should be 1. To interpret the score in classification problems, you have to consider the loss function used. If the loss hyperparameter value is logistic for binary classification or softmax_loss for multiclass classification, then the score can be interpreted as the probability of the corresponding class. These are the loss values used by the linear learner when the `loss` hyperparameter is set to auto as default value. But if the `loss` is set to `hinge_loss`, then the score cannot be interpreted as a probability. This is because hinge loss corresponds to a Support Vector Classifier, which does not produce probability estimates. In the current example, since our loss hyperparameter is logistic for binary classification, we can interpret it as probability of the corresponding class. ``` # Prepare the test data for input into the model test_sample <- as.matrix(iris_test[-1]) dimnames(test_sample)[[2]] <- NULL # Predict using the test data on the deployed model predictions_ep <- model_endpoint2$predict(test_sample) # Add the predictions to the test dataset. df <- data.frame(matrix(unlist(predictions_ep$predictions), nrow=length(predictions_ep$predictions), byrow=TRUE)) df <- df %>% dplyr::rename(score = X1, predicted_label = X2) iris_predictions_ep2 <- dplyr::bind_cols(predicted_flower = df$predicted_label, iris_test) iris_predictions_ep2 # Get the AUC auc(roc(iris_predictions_ep2$predicted_flower,iris_test$Species)) ``` ## Create model 3: KNN in SageMaker Amazon SageMaker k-nearest neighbors (k-NN) algorithm is an index-based algorithm. It uses a non-parametric method for classification or regression. For classification problems, the algorithm queries the k points that are closest to the sample point and returns the most frequently used label of their class as the predicted label. For regression problems, the algorithm queries the k closest points to the sample point and returns the average of their feature values as the predicted value. Training with the k-NN algorithm has three steps: sampling, dimension reduction, and index building. Sampling reduces the size of the initial dataset so that it fits into memory. For dimension reduction, the algorithm decreases the feature dimension of the data to reduce the footprint of the k-NN model in memory and inference latency. We provide two methods of dimension reduction methods: random projection and the fast Johnson-Lindenstrauss transform. Typically, you use dimension reduction for high-dimensional (d >1000) datasets to avoid the “curse of dimensionality” that troubles the statistical analysis of data that becomes sparse as dimensionality increases. The main objective of k-NN's training is to construct the index. The index enables efficient lookups of distances between points whose values or class labels have not yet been determined and the k nearest points to use for inference. See https://docs.aws.amazon.com/sagemaker/latest/dg/k-nearest-neighbors.html for more information. ``` container <- sagemaker$image_uris$retrieve(framework='knn', region= session$boto_region_name, version='latest') cat('KNN Container Image URL: ', container) s3_output <- paste0('s3://', bucket, '/output_knn') estimator3 <- sagemaker$estimator$Estimator(image_uri = container, role = role_arn, train_instance_count = 1L, train_instance_type = 'ml.m5.4xlarge', input_mode = 'File', output_path = s3_output, output_kms_key = NULL, base_job_name = NULL, sagemaker_session = NULL) ``` Hyperparameter `dimension_reduction_target` should not be set when `dimension_reduction_type` is set to its default value, which is `None`. If 'dimension_reduction_target' is set to a certain number without setting `dimension_reduction_type`, then SageMaker will ask us to remove 'dimension_reduction_target' from the specified hyperparameters and try again. In this tutorial, we are not performing dimensionality reduction, since we only have 4 features; so `dimension_reduction_type` is set to its default value of `None`. ``` estimator3$set_hyperparameters( feature_dim = 4L, sample_size = 10L, predictor_type = "classifier" ) ``` Amazon SageMaker k-nearest neighbor model can be tuned with the following hyperparameters: - k - sample_size ``` # Set Hyperparameter Ranges hyperparameter_ranges = list('k' = sagemaker$parameter$IntegerParameter(1L,10L) ) # Create a hyperparamter tuner objective_metric_name = 'test:accuracy' tuner3 <- sagemaker$tuner$HyperparameterTuner(estimator3, objective_metric_name, hyperparameter_ranges, objective_type='Maximize', max_jobs=2L, max_parallel_jobs=2L) # Create a tuning job name job_name <- paste('tune-knn', format(Sys.time(), '%Y%m%d-%H-%M-%S'), sep = '-') # Define the data channels for train and validation datasets input_data <- list('train' = s3_train_input, 'test' = s3_valid_input # KNN needs a test data, does not work without it. ) # train the tuner tuner3$fit(inputs = input_data, job_name = job_name, wait=TRUE, content_type='text/csv;label_size=0') ``` ### Calculate AUC for the test data on model 3 ``` # Deploy the model into an instance of your choosing. model_endpoint3 <- tuner3$deploy(initial_instance_count = 1L, instance_type = 'ml.t2.medium') ``` For inference, the linear learner algorithm supports the application/json, application/x-recordio-protobuf, and text/csv formats. For more information, https://docs.aws.amazon.com/sagemaker/latest/dg/LL-in-formats.html ``` # Specify what data formats you want the input and output of your model to look like. model_endpoint3$serializer <- sagemaker$serializers$CSVSerializer(content_type='text/csv') model_endpoint3$deserializer <- sagemaker$deserializers$JSONDeserializer() ``` In KNN, the input formats for inference are: - CSV - JSON - JSONLINES - RECORDIO The output formats for inference are: - JSON - JSONLINES - Verbose JSON - Verbose RecordIO-ProtoBuf Notice that there is no CSV output format for inference. See https://docs.aws.amazon.com/sagemaker/latest/dg/kNN-inference-formats.html for more details. When you make predictions on new data, the contents of the response data depends on the type of model you choose within Linear Learner. For regression (predictor_type='regressor'), the score is the prediction produced by the model. For classification (predictor_type='binary_classifier' or predictor_type='multiclass_classifier'), the model returns a score and also a predicted_label. The predicted_label is the class predicted by the model and the score measures the strength of that prediction. So, for binary classification, predicted_label is 0 or 1, and score is a single floating point number that indicates how strongly the algorithm believes that the label should be 1. To interpret the score in classification problems, you have to consider the loss function used. If the loss hyperparameter value is logistic for binary classification or softmax_loss for multiclass classification, then the score can be interpreted as the probability of the corresponding class. These are the loss values used by the linear learner when the loss hyperparameter is set to auto as default value. But if the loss is set to hinge_loss, then the score cannot be interpreted as a probability. This is because hinge loss corresponds to a Support Vector Classifier, which does not produce probability estimates. In the current example, since our loss hyperparameter is logistic for binary classification, we can interpret it as probability of the corresponding class. ``` # Prepare the test data for input into the model test_sample <- as.matrix(iris_test[-1]) dimnames(test_sample)[[2]] <- NULL # Predict using the test data on the deployed model predictions_ep <- model_endpoint3$predict(test_sample) ``` We see that the output is of a deserialized JSON format. ``` predictions_ep typeof(predictions_ep) # Add the predictions to the test dataset. df = data.frame(predicted_flower = unlist(predictions_ep$predictions)) iris_predictions_ep2 <- dplyr::bind_cols(predicted_flower = df$predicted_flower, iris_test) iris_predictions_ep2 # Get the AUC auc(roc(iris_predictions_ep2$predicted_flower,iris_test$Species)) ``` ## Compare the AUC of 3 models for the test data - AUC of Sagemaker XGBoost = 1 - AUC of Sagemaker Linear Learner = 0.83 - AUC of Sagemaker KNN = 1 Based on the AUC metric (the higher the better), both XGBoost and KNN perform equally well and are better than the Linear Learner. We can also explore the 3 models with other binary classification metrics such as accuracy, F1 score, and misclassification error. Comparing only the AUC, in this example, we could chose either the XGBoost model or the KNN model to move onto production and close the other two. The deployed model of our choosing can be passed onto production to generate predictions of flower species given that the user only has its sepal and petal measurements. The performance of the deployed model can also be tracked in Amazon CloudWatch. ## Clean up ##### We close the endpoints which we created to free up resources. ``` model_endpoint1$delete_model() model_endpoint2$delete_model() model_endpoint3$delete_model() session$delete_endpoint(model_endpoint1$endpoint) session$delete_endpoint(model_endpoint2$endpoint) session$delete_endpoint(model_endpoint3$endpoint) ```
github_jupyter
## This Notebook - Goals - FOR EDINA **What?:** - Standard classification method example/tutorial **Who?:** - Researchers in ML - Students in computer science - Teachers in ML/STEM **Why?:** - Demonstrate capability/simplicity of core scipy stack. - Demonstrate common ML concept known to learners and used by researchers. **Noteable features to exploit:** - use of pre-installed libraries: <code>numpy</code>, <code>scikit-learn</code>, <code>matplotlib</code> **How?:** - clear to understand - minimise assumed knowledge - clear visualisations - concise explanations - recognisable/familiar - use standard methods - Effective use of core libraries <hr> # Classification - K nearest neighbours K nearest neighbours is a simple and effective way to deal with classification problems. This method classifies each sample based on the class of the points that are closest to it. This is a supervised learning method, meaning that data used contains information on some feature that the model should predict. This notebook shows the process of classifying handwritten digits. <hr> ### Import libraries On Noteable, all the libaries required for this notebook are pre-installed, so they simply need to be imported: ``` import numpy as np import sklearn.datasets as ds import sklearn.model_selection as ms from sklearn import decomposition from sklearn import neighbors from sklearn import metrics import matplotlib.pyplot as plt %matplotlib inline ``` <hr> # Data - Handwritten Digits In terms of data, [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) has a loading function for some data regarding hand written digits. ``` # get the digits data from scikit into the notebook digits = ds.load_digits() ``` The cell above loads the data as a [bunch object](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html), meaning that the data (in this case images of handwritten digits) and the target (the number that is written) can be split by accessing the attributes of the bunch object: ``` # store data and targets seperately X = digits.data y = digits.target print("The data is of the shape", X.shape) print("The target data is of the shape", y.shape) ``` The individual samples in the <code>X</code> array each represent an image. In this representation, 64 numbers are used to represent a greyscale value on an 8\*8 square. The images can be examined by using pyplot's [matshow](https://matplotlib.org/3.3.0/api/_as_gen/matplotlib.pyplot.matshow.html) function. The next cell displays the 17th sample in the dataset as an 8\*8 image. ``` # create figure to display the 17th sample fig = plt.matshow(digits.images[17], cmap=plt.cm.gray) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) ``` Suppose instead of viewing the 17th sample, we want to see the average of samples corresponding to a certain value. This can be done as follows (using 0 as an example): - All samples where the target value is 0 are located - The mean of these samples is taken - The resulting 64 long array is reshaped to be 8\*8 (for display) - The image is displayed ``` # take samples with target=0 izeros = np.where(y == 0) # take average across samples, reshape to visualise zeros = np.mean(X[izeros], axis=0).reshape(8,8) # display fig = plt.matshow(zeros, cmap=plt.cm.gray) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) ``` <hr> # Fit and test the model ## Split the data Now that you have an understanding of the data, the model can be fitted. Fitting the model involves setting some of the data aside for testing, and allowing the model to "see" the target values corresponding to the training samples. Once the model has been fitted to the training data, the model will be tested on some data it has not seen before. The next cell uses [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) to shuffle all data, then set some data aside for testing later. For this example, $\frac{1}{4}$ of the data will be set aside for testing, and the model will be trained on the remaining training set. As before, <code>X</code> corresponds to data samples, and <code>y</code> corresponds to labels. ``` # split data to train and test sets X_train, X_test, y_train, y_test = \ ms.train_test_split(X, y, test_size=0.25, shuffle=True, random_state=22) ``` The data can be examined - here you can see that 1347 samples have been put into the training set, and 450 have been set aside for testing. ``` # print shape of data print("training samples:", X_train.shape) print("testing samples :", X_test.shape) print("training targets:", y_train.shape) print("testing targets :", y_test.shape) ``` ## Using PCA to visualise data Before diving into classifying, it is useful to visualise the data. Since each sample has 64 dimensions, some dimensionality reduction is needed in order to visualise the samples as points on a 2D map. One of the easiest ways of visualising high dimensional data is by principal component analysis (PCA). This maps the 64 dimensional image data onto a lower dimension map (here we will map to 2D) so it can be easily viewed on a screen. In this case, the 2 most important "components" are maintained. ``` # create PCA model with 2 components pca = decomposition.PCA(n_components=2) ``` The next step is to perform the PCA on the samples, and store the results. ``` # transform training data to 2 principal components X_pca = pca.fit_transform(X_train) # transform test data to 2 principal components T_pca = pca.transform(X_test) # check shape of result print(X_pca.shape) print(T_pca.shape) ``` As you can see from the above cell, the <code>X_pca</code> and <code>T_pca</code> data is now represented by only 2 elements per sample. The number of samples has remained the same. Now that there is a 2D representation of the data, it can be plotted on a regular scatter graph. Since the labels corresponding to each point are stored in the <code>y_train</code> variable, the plot can be colour coded by target value! Different coloured dots have different target values. ``` # choose the colours for each digit cmap_digits = plt.cm.tab10 # plot training data with labels plt.figure(figsize = (9,6)) plt.scatter(X_pca[:,0], X_pca[:,1], s=7, c=y_train, cmap=cmap_digits, alpha=0.7) plt.title("Training data coloured by target value") plt.colorbar(); ``` ## Create and fit the model The scikit-learn library allows fitting of a k-NN model just as with PCA above. First, create the classifier: ``` # create model knn = neighbors.KNeighborsClassifier() ``` The next step fits the k-NN model using the training data. ``` # fit model to training data knn.fit(X_train,y_train); ``` ## Test model Now use the data that was set aside earlier - this stage involves getting the model to "guess" the samples (this time without seeing their target values). Once the model has predicted the sample's class, a score can be calculated by checking how many samples the model guessed correctly. ``` # predict test data preds = knn.predict(X_test) # test model on test data score = round(knn.score(X_test,y_test)*100, 2) print("Score on test data: " + str(score) + "%") ``` 98.44% is a really high score, one that would not likely be seen on real life applications of the method. It can often be useful to visualise the results of your example. Below are plots showing: - The labels that the model predicted for the test data - The actual labels for the test data - The data points that were incorrectly labelled In this case, the predicted and actual plots are very similar, so these plots are not very informative. In other cases, this kind of visualisation may reveal patterns for you to explore further. ``` # plot 3 axes fig, axes = plt.subplots(2,2,figsize=(12,12)) # top left axis for predictions axes[0,0].scatter(T_pca[:,0], T_pca[:,1], s=5, c=preds, cmap=cmap_digits) axes[0,0].set_title("Predicted labels") # top right axis for actual targets axes[0,1].scatter(T_pca[:,0], T_pca[:,1], s=5, c=y_test, cmap=cmap_digits) axes[0,1].set_title("Actual labels") # bottom left axis coloured to show correct and incorrect axes[1,0].scatter(T_pca[:,0], T_pca[:,1], s=5, c=(preds==y_test)) axes[1,0].set_title("Incorrect labels") # bottom right axis not used axes[1,1].set_axis_off() ``` So which samples did the model get wrong? There were 7 samples that were misclassified. These can be displayed alongside their actual and predicted labels using the cell below: ``` # find the misclassified samples misclass = np.where(preds!=y_test)[0] # display misclassified samples r, c = 1, len(misclass) fig, axes = plt.subplots(r,c,figsize=(10,5)) for i in range(c): ax = axes[i] ax.matshow(X_test[misclass[i]].reshape(8,8),cmap=plt.cm.gray) ax.set_axis_off() act = y_test[misclass[i]] pre = preds[misclass[i]] strng = "actual: {a:.0f} \npredicted: {p:.0f}".format(a=act, p=pre) ax.set_title(strng) ``` Additionally, a confusion matrix can be used to identify which samples are misclassified by the model. This can help you identify if their are samples that are commonly misidentified - for example you may identify that 8's are often mistook for 1's. ``` # confusion matrix conf = metrics.confusion_matrix(y_test,preds) # figure f, ax = plt.subplots(figsize=(9,5)) im = ax.imshow(conf, cmap=plt.cm.RdBu) # set labels as ticks on axes ax.set_xticks(np.arange(10)) ax.set_yticks(np.arange(10)) ax.set_xticklabels(list(range(0,10))) ax.set_yticklabels(list(range(0,10))) ax.set_ylim(9.5,-0.5) # axes labels ax.set_ylabel("actual value") ax.set_xlabel("predicted value") ax.set_title("Digit classification confusion matrix") # display plt.colorbar(im).set_label(label="number of classifications") ```
github_jupyter
## Dependencies ``` !nvidia-smi !jupyter notebook list %env CUDA_VISIBLE_DEVICES=3 %matplotlib inline %load_ext autoreload %autoreload 2 import time from pathlib import Path import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms from models import tiramisu from models import tiramisu_bilinear from models import tiramisu_m3 from models import unet from datasets import deepglobe from datasets import maroads from datasets import joint_transforms import utils.imgs import utils.training as train_utils # tensorboard from torch.utils.tensorboard import SummaryWriter ``` ## Dataset Download the DeepGlobe dataset from https://competitions.codalab.org/competitions/18467. Place it in datasets/deepglobe/dataset/train,test,valid Download the Massachusetts Road Dataset from https://www.cs.toronto.edu/~vmnih/data/. Combine the training, validation, and test sets, process with `crop_dataset.ipynb` and place the output in datasets/maroads/dataset/map,sat ``` run = "expM.3.drop2.1" DEEPGLOBE_PATH = Path('datasets/', 'deepglobe/dataset') MAROADS_PATH = Path('datasets/', 'maroads/dataset') RESULTS_PATH = Path('.results/') WEIGHTS_PATH = Path('.weights/') RUNS_PATH = Path('.runs/') RESULTS_PATH.mkdir(exist_ok=True) WEIGHTS_PATH.mkdir(exist_ok=True) RUNS_PATH.mkdir(exist_ok=True) batch_size = 1 # TODO: Should be `MAX_BATCH_PER_CARD * torch.cuda.device_count()` (which in this case is 1 assuming max of 1 batch per card) # resize = joint_transforms.JointRandomCrop((300, 300)) normalize = transforms.Normalize(mean=deepglobe.mean, std=deepglobe.std) train_joint_transformer = transforms.Compose([ # resize, joint_transforms.JointRandomHorizontalFlip(), joint_transforms.JointRandomVerticalFlip(), joint_transforms.JointRandomRotate() ]) train_slice = slice(None,4000) test_slice = slice(4000,None) train_dset = deepglobe.DeepGlobe(DEEPGLOBE_PATH, 'train', slc = train_slice, joint_transform=train_joint_transformer, transform=transforms.Compose([ transforms.ColorJitter(brightness=.4,contrast=.4,saturation=.4), transforms.ToTensor(), normalize, ])) train_dset_ma = maroads.MARoads(MAROADS_PATH, joint_transform=train_joint_transformer, transform=transforms.Compose([ transforms.ColorJitter(brightness=.4,contrast=.4,saturation=.4), transforms.ToTensor(), normalize, ])) # print(len(train_dset_ma.imgs)) # print(len(train_dset_ma.msks)) train_dset_combine = torch.utils.data.ConcatDataset((train_dset, train_dset_ma)) # train_loader = torch.utils.data.DataLoader(train_dset, batch_size=batch_size, shuffle=True) # train_loader = torch.utils.data.DataLoader(train_dset_ma, batch_size=batch_size, shuffle=True) train_loader = torch.utils.data.DataLoader( train_dset_combine, batch_size=batch_size, shuffle=True) # resize_joint_transformer = transforms.Compose([ # resize # ]) resize_joint_transformer = None val_dset = deepglobe.DeepGlobe( DEEPGLOBE_PATH, 'valid', joint_transform=resize_joint_transformer, transform=transforms.Compose([ transforms.ToTensor(), normalize ])) val_loader = torch.utils.data.DataLoader( val_dset, batch_size=batch_size, shuffle=False) test_dset = deepglobe.DeepGlobe( DEEPGLOBE_PATH, 'train', joint_transform=resize_joint_transformer, slc = test_slice, transform=transforms.Compose([ transforms.ToTensor(), normalize ])) test_loader = torch.utils.data.DataLoader( test_dset, batch_size=batch_size, shuffle=False) print("Train: %d" %len(train_loader.dataset)) print("Val: %d" %len(val_loader.dataset.imgs)) print("Test: %d" %len(test_loader.dataset.imgs)) # print("Classes: %d" % len(train_loader.dataset.classes)) print((iter(train_loader))) inputs, targets = next(iter(train_loader)) print("Inputs: ", inputs.size()) print("Targets: ", targets.size()) # utils.imgs.view_image(inputs[0]) # utils.imgs.view_image(targets[0]) # utils.imgs.view_annotated(targets[0]) # print(targets[0]) for i,(image,label) in enumerate(iter(test_loader)): if i % 10 == 0: print("Procssing image",i) im = image[0] # scale to [0,1] im -= im.min() im /= im.max() im = torchvision.transforms.ToPILImage()(im) im.save("ds_test/" + str(i) + ".png") label = label.float() la = torchvision.transforms.ToPILImage()(label) la.save("ds_test/" + str(i) + ".mask.png") print("Done!") ```
github_jupyter
``` from os import listdir from numpy import array from keras.preprocessing.text import Tokenizer, one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Model, Sequential, model_from_json from keras.utils import to_categorical from keras.layers.core import Dense, Dropout, Flatten from keras.optimizers import RMSprop from keras.layers.convolutional import Conv2D from keras.callbacks import ModelCheckpoint from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense from keras.preprocessing.image import array_to_img, img_to_array, load_img import numpy as np dir_name = '/data/train/' # Read a file and return a string def load_doc(filename): file = open(filename, 'r') text = file.read() file.close() return text def load_data(data_dir): text = [] images = [] # Load all the files and order them all_filenames = listdir(data_dir) all_filenames.sort() for filename in (all_filenames): if filename[-3:] == "npz": # Load the images already prepared in arrays image = np.load(data_dir+filename) images.append(image['features']) else: # Load the boostrap tokens and rap them in a start and end tag syntax = '<START> ' + load_doc(data_dir+filename) + ' <END>' # Seperate all the words with a single space syntax = ' '.join(syntax.split()) # Add a space after each comma syntax = syntax.replace(',', ' ,') text.append(syntax) images = np.array(images, dtype=float) return images, text train_features, texts = load_data(dir_name) # Initialize the function to create the vocabulary tokenizer = Tokenizer(filters='', split=" ", lower=False) # Create the vocabulary tokenizer.fit_on_texts([load_doc('bootstrap.vocab')]) # Add one spot for the empty word in the vocabulary vocab_size = len(tokenizer.word_index) + 1 # Map the input sentences into the vocabulary indexes train_sequences = tokenizer.texts_to_sequences(texts) # The longest set of boostrap tokens max_sequence = max(len(s) for s in train_sequences) # Specify how many tokens to have in each input sentence max_length = 48 def preprocess_data(sequences, features): X, y, image_data = list(), list(), list() for img_no, seq in enumerate(sequences): for i in range(1, len(seq)): # Add the sentence until the current count(i) and add the current count to the output in_seq, out_seq = seq[:i], seq[i] # Pad all the input token sentences to max_sequence in_seq = pad_sequences([in_seq], maxlen=max_sequence)[0] # Turn the output into one-hot encoding out_seq = to_categorical([out_seq], num_classes=vocab_size)[0] # Add the corresponding image to the boostrap token file image_data.append(features[img_no]) # Cap the input sentence to 48 tokens and add it X.append(in_seq[-48:]) y.append(out_seq) return np.array(X), np.array(y), np.array(image_data) X, y, image_data = preprocess_data(train_sequences, train_features) #Create the encoder image_model = Sequential() image_model.add(Conv2D(16, (3, 3), padding='valid', activation='relu', input_shape=(256, 256, 3,))) image_model.add(Conv2D(16, (3,3), activation='relu', padding='same', strides=2)) image_model.add(Conv2D(32, (3,3), activation='relu', padding='same')) image_model.add(Conv2D(32, (3,3), activation='relu', padding='same', strides=2)) image_model.add(Conv2D(64, (3,3), activation='relu', padding='same')) image_model.add(Conv2D(64, (3,3), activation='relu', padding='same', strides=2)) image_model.add(Conv2D(128, (3,3), activation='relu', padding='same')) image_model.add(Flatten()) image_model.add(Dense(1024, activation='relu')) image_model.add(Dropout(0.3)) image_model.add(Dense(1024, activation='relu')) image_model.add(Dropout(0.3)) image_model.add(RepeatVector(max_length)) visual_input = Input(shape=(256, 256, 3,)) encoded_image = image_model(visual_input) language_input = Input(shape=(max_length,)) language_model = Embedding(vocab_size, 50, input_length=max_length, mask_zero=True)(language_input) language_model = LSTM(128, return_sequences=True)(language_model) language_model = LSTM(128, return_sequences=True)(language_model) #Create the decoder decoder = concatenate([encoded_image, language_model]) decoder = LSTM(512, return_sequences=True)(decoder) decoder = LSTM(512, return_sequences=False)(decoder) decoder = Dense(vocab_size, activation='softmax')(decoder) # Compile the model model = Model(inputs=[visual_input, language_input], outputs=decoder) optimizer = RMSprop(lr=0.0001, clipvalue=1.0) model.compile(loss='categorical_crossentropy', optimizer=optimizer) #Save the model for every 2nd epoch filepath="org-weights-epoch-{epoch:04d}--val_loss-{val_loss:.4f}--loss-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_weights_only=True, period=2) callbacks_list = [checkpoint] # Train the model model.fit([image_data, X], y, batch_size=64, shuffle=False, validation_split=0.1, callbacks=callbacks_list, verbose=1, epochs=50) ```
github_jupyter
# Programación lineal <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Linear_Programming_Feasible_Region.svg/2000px-Linear_Programming_Feasible_Region.svg.png" width="400px" height="125px" /> > La programación lineal es el campo de la optimización matemática dedicado a maximizar o minimizar (optimizar) funciones lineales, denominada función objetivo, de tal forma que las variables de dicha función estén sujetas a una serie de restricciones expresadas mediante un sistema de ecuaciones o inecuaciones también lineales. **Referencias:** - https://es.wikipedia.org/wiki/Programaci%C3%B3n_lineal - https://docs.scipy.org/doc/scipy-0.18.1/reference/optimize.html ## 1. Apuntes históricos <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/5/5e/JohnvonNeumann-LosAlamos.gif" width="400px" height="125px" /> - 1826: Joseph Fourier anticipa la programación lineal. Carl Friedrich Gauss resuelve ecuaciones lineales por eliminación "gaussiana". - 1902: Gyula Farkas concibe un método para resolver sistemas de inecuaciones. - Es hasta la Segunda Guerra Mundial que se plantea la programación lineal como un modelo matemático para planificar gastos y retornos, de modo que se reduzcan costos de guerra y aumentar pérdidas del enemigo. Secreto hasta 1947 (posguerra). - 1947: George Dantzig publica el algoritmo simplex y John von Neumann desarrolló la teoría de la dualidad. Se sabe que Leonid Kantoróvich también formuló la teoría en forma independiente. - Fue usado por muchas industrias en la planificación diaria. **Hasta acá, tiempos exponenciales de solución. Lo siguiente, tiempo polinomial.** - 1979: Leonid Khachiyan, diseñó el llamado Algoritmo del elipsoide, a través del cual demostró que el problema de la programación lineal es resoluble de manera eficiente, es decir, en tiempo polinomial. - 1984: Narendra Karmarkar introduce el método del punto interior para resolver problemas de programación lineal. **Mencionar complejidad computacional.** ## 2. Motivación Ya la clase pasada habíamos mencionado que cuando se quería optimizar una función de varias variables con restricciones, se podía aplicar siempre el método de Multiplicadores de Lagrange. Sin embargo, este método es computacionalmente muy complejo conforme crece el número de variables. Por tanto, cuando la función a optimizar y las restricciones son de caracter lineal, los métodos de solución que se pueden desarrollar son computacionalmente eficientes, por lo que es útil realizar la distinción. ## 3. Problemas de programación lineal ### 3.1. Ejemplo básico Una compañía produce dos productos ($X_1$ y $X_2$) usando dos máquinas ($A$ y $B$). Cada unidad de $X_1$ que se produce requiere 50 minutos en la máquina $A$ y 30 minutos en la máquina $B$. Cada unidad de $X_2$ que se produce requiere 24 minutos en la máquina $A$ y 33 minutos en la máquina $B$. Al comienzo de la semana hay 30 unidades de $X_1$ y 90 unidades de $X_2$ en inventario. El tiempo de uso disponible de la máquina $A$ es de 40 horas y el de la máquina $B$ es de 35 horas. La demanda para $X_1$ en la semana actual es de 75 unidades y de $X_2$ es de 95 unidades. La política de la compañía es maximizar la suma combinada de unidades de $X_1$ e $X_2$ en inventario al finalizar la semana. Formular el problema de decidir cuánto hacer de cada producto en la semana como un problema de programación lineal. #### Solución Sean: - $x_1$ la cantidad de unidades de $X_1$ a ser producidas en la semana, y - $x_2$ la cantidad de unidades de $X_2$ a ser producidas en la semana. Notar que lo que se quiere es maximizar $x_1+x_2$. Restricciones: 1. El tiempo de uso disponible de la máquina $A$ es de 40 horas: $50x_1+24x_2\leq 40(60)\Rightarrow 50x_1+24x_2\leq 2400$. 2. El tiempo de uso disponible de la máquina $B$ es de 35 horas: $30x_1+33x_2\leq 35(60)\Rightarrow 30x_1+33x_2\leq 2100$. 3. La demanda para $X_1$ en la semana actual es de 75 unidades: $x_1+30\geq 75\Rightarrow x_1\geq 45\Rightarrow -x_1\leq -45$. 4. La demanda para $X_2$ en la semana actual es de 95 unidades: $x_2+90\geq 95\Rightarrow x_2\geq 5\Rightarrow -x_2\leq -5$. Finalmente, el problema puede ser expresado en la forma explicada como: \begin{equation} \begin{array}{ll} \min_{x_1,x_2} & -x_1-x_2 \\ \text{s. a. } & 50x_1+24x_2\leq 2400 \\ & 30x_1+33x_2\leq 2100 \\ & -x_1\leq -45 \\ & -x_2\leq -5, \end{array} \end{equation} o, eqivalentemente \begin{equation} \begin{array}{ll} \min_{\boldsymbol{x}} & \boldsymbol{c}^T\boldsymbol{x} \\ \text{s. a. } & \boldsymbol{A}_{eq}\boldsymbol{x}=\boldsymbol{b}_{eq} \\ & \boldsymbol{A}\boldsymbol{x}\leq\boldsymbol{b}, \end{array} \end{equation} con - $\boldsymbol{c}=\left[-1 \quad -1\right]^T$, - $\boldsymbol{A}=\left[\begin{array}{cc}50 & 24 \\ 30 & 33\\ -1 & 0\\ 0 & -1\end{array}\right]$, y - $\boldsymbol{b}=\left[2400\quad 2100\quad -45\quad -5\right]^T$. Preferiremos, en adelante, la notación vectorial/matricial. ### 3.2. En general De acuerdo a lo descrito anteriormente, un problema de programación lineal puede escribirse en la siguiente forma: \begin{equation} \begin{array}{ll} \min_{x_1,\dots,x_n} & c_1x_1+\dots+c_nx_n \\ \text{s. a. } & a^{eq}_{j,1}x_1+\dots+a^{eq}_{j,n}x_n=b^{eq}_j \text{ para } 1\leq j\leq m_1 \\ & a_{k,1}x_1+\dots+a_{k,n}x_n\leq b_k \text{ para } 1\leq k\leq m_2, \end{array} \end{equation} donde: - $x_i$ para $i=1,\dots,n$ son las incógnitas o variables de decisión, - $c_i$ para $i=1,\dots,n$ son los coeficientes de la función a optimizar, - $a^{eq}_{j,i}$ para $j=1,\dots,m_1$ e $i=1,\dots,n$, son los coeficientes de la restricción de igualdad, - $a_{k,i}$ para $k=1,\dots,m_2$ e $i=1,\dots,n$, son los coeficientes de la restricción de desigualdad, - $b^{eq}_j$ para $j=1,\dots,m_1$ son valores conocidos que deben ser respetados estrictamente, y - $b_k$ para $k=1,\dots,m_2$ son valores conocidos que no deben ser superados. Equivalentemente, el problema puede escribirse como \begin{equation} \begin{array}{ll} \min_{\boldsymbol{x}} & \boldsymbol{c}^T\boldsymbol{x} \\ \text{s. a. } & \boldsymbol{A}_{eq}\boldsymbol{x}=\boldsymbol{b}_{eq} \\ & \boldsymbol{A}\boldsymbol{x}\leq\boldsymbol{b}, \end{array} \end{equation} donde: - $\boldsymbol{x}=\left[x_1\quad\dots\quad x_n\right]^T$, - $\boldsymbol{c}=\left[c_1\quad\dots\quad c_n\right]^T$, - $\boldsymbol{A}_{eq}=\left[\begin{array}{ccc}a^{eq}_{1,1} & \dots & a^{eq}_{1,n}\\ \vdots & \ddots & \vdots\\ a^{eq}_{m_1,1} & \dots & a^{eq}_{m_1,n}\end{array}\right]$, - $\boldsymbol{A}=\left[\begin{array}{ccc}a_{1,1} & \dots & a_{1,n}\\ \vdots & \ddots & \vdots\\ a_{m_2,1} & \dots & a_{m_2,n}\end{array}\right]$, - $\boldsymbol{b}_{eq}=\left[b^{eq}_1\quad\dots\quad b^{eq}_{m_1}\right]^T$, y - $\boldsymbol{b}=\left[b_1\quad\dots\quad b_{m_2}\right]^T$. **Nota:** el problema $\max_{\boldsymbol{x}}\boldsymbol{g}(\boldsymbol{x})$ es equivalente a $\min_{\boldsymbol{x}}-\boldsymbol{g}(\boldsymbol{x})$. #### Bueno, y una vez planteado, ¿cómo se resuelve el problema? Este problema está sencillo pues solo es en dos variables. La solución gráfica es válida. ``` import matplotlib.pyplot as plt %matplotlib inline import numpy as np def res1(x1): return (2400-50*x1)/24 def res2(x1): return (2100-30*x1)/33 x1 = np.linspace(40, 50) r1 = res1(x1) r2 = res2(x1) plt.figure(figsize = (8,6)) plt.plot(x1, res1(x1), 'b--', label = 'res1') plt.plot(x1, res2(x1), 'r--', label = 'res2') plt.plot([45, 45], [0, 25], 'k', label = 'res3') plt.plot([40, 50], [5, 5], 'm', label = 'res4') plt.fill_between(np.array([45.0, 45.6]), res1(np.array([45.0, 45.6])), 5*np.ones(2)) plt.text(44,4,'$(45,5)$',fontsize=10) plt.text(45.1,6.35,'$(45,6.25)$',fontsize=10) plt.text(45.6,4,'$(45.6,5)$',fontsize=10) plt.legend(loc = 'best') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.axis([44, 46, 4, 7]) plt.show() ``` **Actividad.** Mónica hace aretes y cadenitas de joyería. Es tan buena, que todo lo que hace lo vende. Le toma 30 minutos hacer un par de aretes y una hora hacer una cadenita, y como Mónica también es estudihambre, solo dispone de 10 horas a la semana para hacer las joyas. Por otra parte, el material que compra solo le alcanza para hacer 15 unidades (el par de aretes cuenta como unidad) de joyas por semana. La utilidad que le deja la venta de las joyas es \$15 en cada par de aretes y \$20 en cada cadenita. ¿Cuántos pares de aretes y cuántas cadenitas debería hacer Mónica para maximizar su utilidad? Formular el problema en la forma explicada y obtener la solución gráfica (puede ser a mano). **Diez minutos: quien primero lo haga, pasará a explicarlo al tablero y le subiré la nota de alguna tarea a 100. Debe salir a explicar el problema en el pizarrón.** ## 5. ¿Cómo se resuelve en python? ### 5.1 Librería `SciPy` <img style="float: right; margin: 0px 0px 15px 15px;" src="https://scipy.org/_static/images/scipy_med.png" width="200px" height="75px" /> `SciPy` es un softwar de código abierto basado en `Python` para matemáticas, ciencia e ingeniería. En particular, los siguientes son algunos de los paquetes básicos: - `NumPy` - **Librería `SciPy`** - `SymPy` - `matplotlib` - `pandas` La **Librería `SciPy`** es uno de los paquetes principales y provee varias rutinas numéricas eficientes. Entre ellas, para integración numérica y optimización. En esta clase, y en lo que resta del módulo, estaremos utilizando el módulo `optimize` de la librería `SciPy`. **Importémoslo** ``` # Importar el módulo optimize de la librería scipy import scipy.optimize as opt ``` El módulo `optimize` que acabamos de importar contiene varias funciones para optimización y búsqueda de raices ($f(x)=0$). Entre ellas se encuentra la función `linprog` ``` # Función linprog del módulo optimize help(opt.linprog) ``` la cual resuelve problemas como los que aprendimos a plantear. ### 5.2 Solución del ejemplo básico con linprog Ya hicimos la solución gráfica. Contrastemos con la solución que nos da `linprog`... ``` # Importar numpy para crear las matrices import numpy as np # Crear las matrices para resolver el problema c = np.array([-1, -1]) A = np.array([[50, 24], [30, 33], [-1, 0], [0, -1]]) b = np.array([2400, 2100, -45, -5]) b # Resolver utilizando linprog resultado = opt.linprog(c, A_ub=A, b_ub=b) # Mostrar el resultado resultado # Extraer el vector solución resultado.x ``` **Conclusión** - Para maximizar el inventario conjunto de cantidad de productos X1 y X2, se deben producir 45 unidades de X1 y 6.25 unidades de X2. - Con esa producción, el inventario conjunto al finalizar la semana es de 1.25 unidades. **Otra forma:** poner las cotas de las variables a parte ``` # Escribir matrices y cotas c = np.array([-1, -1]) A = np.array([[50, 24], [30, 33]]) b = np.array([2400, 2100]) x1_bound = (45, None) x2_bound = (5, None) # Resolver resultado2 = opt.linprog(c, A_ub=A, b_ub=b, bounds=(x1_bound,x2_bound)) # Mostrar el resultado resultado2 ``` **Actividad.** Resolver el ejemplo de Mónica y sus tiliches con `linprog` ``` # Resolver acá c = np.array([-15, -20]) A = np.array([[1, 2], [1, 1]]) b = np.array([20, 15]) resultado_monica = opt.linprog(c, A_ub=A, b_ub=b) resultado_monica ``` ## 6. Problema de transporte 1 - **Referencia**: https://es.wikipedia.org/wiki/Programaci%C3%B3n_lineal <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/a/a0/Progr_Lineal.PNG" width="400px" height="125px" /> Este es un caso curioso, con solo 6 variables (un caso real de problema de transporte puede tener fácilmente más de 1.000 variables) en el cual se aprecia la utilidad de este procedimiento de cálculo. Existen tres minas de carbón cuya producción diaria es: - la mina "a" produce 40 toneladas de carbón por día; - la mina "b" produce 40 t/día; y, - la mina "c" produce 20 t/día. En la zona hay dos centrales termoeléctricas que consumen: - la central "d" consume 40 t/día de carbón; y, - la central "e" consume 60 t/día. Los costos de mercado, de transporte por tonelada son: - de "a" a "d" = 2 monedas; - de "a" a "e" = 11 monedas; - de "b" a "d" = 12 monedas; - de "b" a "e" = 24 monedas; - de "c" a "d" = 13 monedas; y, - de "c" a "e" = 18 monedas. Si se preguntase a los pobladores de la zona cómo organizar el transporte, tal vez la mayoría opinaría que debe aprovecharse el precio ofrecido por el transportista que va de "a" a "d", porque es más conveniente que los otros, debido a que es el de más bajo precio. En este caso, el costo total del transporte es: - transporte de 40 t de "a" a "d" = 80 monedas; - transporte de 20 t de "c" a "e" = 360 monedas; y, - transporte de 40 t de "b" a "e" = 960 monedas, Para un total 1.400 monedas. Sin embargo, formulando el problema para ser resuelto por la programación lineal con - $x_1$ toneladas transportadas de la mina "a" a la central "d" - $x_2$ toneladas transportadas de la mina "a" a la central "e" - $x_3$ toneladas transportadas de la mina "b" a la central "d" - $x_4$ toneladas transportadas de la mina "b" a la central "e" - $x_5$ toneladas transportadas de la mina "c" a la central "d" - $x_6$ toneladas transportadas de la mina "c" a la central "e" se tienen las siguientes ecuaciones: Restricciones de la producción: - $x_1 + x_2 \leq 40$ - $x_3 + x_4 \leq 40$ - $x_5 + x_6 \leq 20$ Restricciones del consumo: - $x_1 + x_3 + x_5 \geq 40$ - $x_2 + x_4 + x_6 \geq 60$ La función objetivo será: $$\min_{x_1,\dots,x_6}2x_1 + 11x_2 + 12x_3 + 24x_4 + 13x_5 + 18x_6$$ Resolver con `linprog` ``` # Matrices y cotas c = np.array([2, 11, 12, 24, 13, 18]) A = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1], [-1, 0, -1, 0, -1, 0], [0, -1, 0, -1, 0, -1]]) b = np.array([40, 40, 20, -40, -60]) # Resolver resultado_transporte = opt.linprog(c, A_ub=A, b_ub=b) # Mostrar resultado resultado_transporte ``` **Conclusión** - La estrategia de menor costo es llevar 40 toneladas de la mina "a" a la central "e", 40 toneladas de la mina "b" a la central "d" y 20 toneladas de la mina "c" a la central "e". El costo total de esta estrategia de transporte es 1280 monedas. ## 7. Optimización de inversión en bonos **Referencia:** ``` from IPython.display import YouTubeVideo YouTubeVideo('gukxBus8lOs') ``` El objetivo de este problema es determinar la mejor estrategia de inversión, dados diferentes tipos de bono, la máxima cantidad que puede ser invertida en cada bono, el porcentaje de retorno y los años de madurez. También hay una cantidad fija de dinero disponible ($\$750,000$). Por lo menos la mitad de este dinero debe ser invertido en bonos con 10 años o más para la madurez. Se puede invertir un máximo del $25\%$ de esta cantidad en cada bono. Finalmente, hay otra restricción que no permite usar más de $35\%$ en bonos de alto riesgo. Existen seis (6) opciones de inversión con las letras correspondientes $A_i$ 1. $A_1$:(Tasa de retorno=$8.65\%$; Años para la madurez=11, Riesgo=Bajo) 1. $A_2$:(Tasa de retorno=$9.50\%$; Años para la madurez=10, Riesgo=Alto) 1. $A_3$:(Tasa de retorno=$10.00\%$; Años para la madurez=6, Riesgo=Alto) 1. $A_4$:(Tasa de retorno=$8.75\%$; Años para la madurez=10, Riesgo=Bajo) 1. $A_5$:(Tasa de retorno=$9.25\%$; Años para la madurez=7, Riesgo=Alto) 1. $A_6$:(Tasa de retorno=$9.00\%$; Años para la madurez=13, Riesgo=Bajo) Lo que se quiere entonces es maximizar el retorno que deja la inversión. Este problema puede ser resuelto con programación lineal. Formalmente, puede ser descrito como: $$\max_{A_1,A_2,...,A_6}\sum^{6}_{i=1} A_iR_i,$$ donde $A_i$ representa la cantidad invertida en la opción, y $R_i$ representa la tasa de retorno respectiva. Plantear restricciones... ``` # Matrices y cotas # Resolver # Mostrar resultado ``` Recordar que en el problema minimizamos $-\sum^{6}_{i=1} A_iR_i$. El rendimiento obtenido es entonces: **Conclusión** - ## 8. Tarea ### 1. Diseño de la Dieta Óptima Se quiere producir comida para gatos de la manera más barata, no obstante se debe también asegurar que se cumplan los datos requeridos de analisis nutricional. Por lo que se quiere variar la cantidad de cada ingrediente para cumplir con los estandares nutricionales. Los requisitos que se tienen es que en 100 gramos, se deben tener por lo menos 8 gramos de proteína y 6 gramos de grasa. Así mismo, no se debe tener más de 2 gramos de fibra y 0.4 gramos de sal. Los datos nutricionales se pueden obtener de la siguiente tabla: Ingrediente|Proteína|Grasa|Fibra|Sal :----|---- Pollo| 10.0%|08.0%|00.1%|00.2% Carne| 20.0%|10.0%|00.5%|00.5% Cordero|15.0%|11.0%|00.5%|00.7% Arroz| 00.0%|01.0%|10.0%|00.2% Trigo| 04.0%|01.0%|15.0%|00.8% Gel| 00.0%|00.0%|00.0%|00.0% Los costos de cada producto son: Ingrediente|Costo por gramo :----|---- Pollo|$\$$0.013 Carne|$\$$0.008 Cordero|$\$$0.010 Arroz|$\$$0.002 Trigo|$\$$0.005 Gel|$\$$0.001 Lo que se busca optimizar en este caso es la cantidad de productos que se debe utilizar en la comida de gato, para simplificar la notación se van a nombrar las siguientes variables: $x_1:$ Gramos de pollo $x_2:$ Gramos de carne $x_3:$ Gramos de cordero $x_4:$ Gramos de arroz $x_5:$ Gramos de trigo $x_6:$ Gramos de gel Con los datos, se puede plantear la función objetivo, está dada por la siguiente expresión: $$\min 0.013 x_1 + 0.008 x_2 + 0.010 x_3 + 0.002 x_4 + 0.005 x_5 + 0.001 x_6$$ Las restricciones estarían dadas por el siguiente conjunto de ecuaciones: $x_1+x_2+x_3+x_4+x_5+x_6=100$ $(10.0 x_1+ 20.0 x_2+ 15.0 x_3+ 00.0 x_4+ 04.0 x_5+ 00.0 x_6)/100 \geq 8.0$ $(08.0 x_1+ 10.0 x_2+ 11.0 x_3+ 01.0 x_4+ 01.0 x_5+ 00.0 x_6)/100 \geq 6.0$ $(00.1 x_1+ 00.5 x_2+ 00.5 x_3+ 10.0 x_4+ 15.0 x_5+ 00.0 x_6)/100 \leq 2.0$ $(00.2 x_1+ 00.5 x_2+ 00.7 x_3+ 00.2 x_4+ 00.8 x_5+ 00.0 x_6)/100 \leq 0.4$ La primer condición asegura que la cantidad de productos que se usará cumple con los 100 gramos. Las siguientes sólo siguen los lineamientos planteados para cumplir con los requisitos nutrimentales. ### 2. Otro problema de transporte Referencia: https://relopezbriega.github.io/blog/2017/01/18/problemas-de-optimizacion-con-python/ Supongamos que tenemos que enviar cajas de cervezas de 2 cervecerías (Modelo y Cuauhtémoc Moctezuma) a 5 bares de acuerdo al siguiente gráfico: <img style="float: center; margin: 0px 0px 15px 15px;" src="https://relopezbriega.github.io/images/Trans_problem.png" width="500px" height="150px" /> Asimismo, supongamos que nuestro gerente financiero nos informa que el costo de transporte por caja de cada ruta se conforma de acuerdo a la siguiente tabla: ``` import pandas as pd info = pd.DataFrame({'Bar1': [2, 3], 'Bar2': [4, 1], 'Bar3': [5, 3], 'Bar4': [2, 2], 'Bar5': [1, 3]}, index = ['CerveceriaA', 'CerveceriaB']) info ``` Y por último, las restricciones del problema, van a estar dadas por las capacidades de oferta y demanda de cada cervecería (en cajas de cerveza) y cada bar, las cuales se detallan en el gráfico de más arriba. Sean: - $x_i$ cajas transportadas de la cervecería A al Bar $i$, - $x_{i+5}$ cajas transportadas de la cervecería B al Bar $i$. La tarea consiste en plantear el problema de minimizar el costo de transporte de la forma vista y resolverlo con `linprog`. Deben crear un notebook de jupyter (archivo .ipynb) y llamarlo Tarea4_ApellidoNombre, y subirlo a moodle. **Definir fecha** <script> $(document).ready(function(){ $('div.prompt').hide(); $('div.back-to-top').hide(); $('nav#menubar').hide(); $('.breadcrumb').hide(); $('.hidden-print').hide(); }); </script> <footer id="attribution" style="float:right; color:#808080; background:#fff;"> Created with Jupyter by Esteban Jiménez Rodríguez. </footer>
github_jupyter
<a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width=400 align="center"></a> <h1 align="center"><font size="5"> Logistic Regression with Python</font></h1> In this notebook, you will learn Logistic Regression, and then, you'll create a model for a telecommunication company, to predict when its customers will leave for a competitor, so that they can take some action to retain the customers. <h1>Table of contents</h1> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ol> <li><a href="#about_dataset">About the dataset</a></li> <li><a href="#preprocessing">Data pre-processing and selection</a></li> <li><a href="#modeling">Modeling (Logistic Regression with Scikit-learn)</a></li> <li><a href="#evaluation">Evaluation</a></li> <li><a href="#practice">Practice</a></li> </ol> </div> <br> <hr> <a id="ref1"></a> ## What is the difference between Linear and Logistic Regression? While Linear Regression is suited for estimating continuous values (e.g. estimating house price), it is not the best tool for predicting the class of an observed data point. In order to estimate the class of a data point, we need some sort of guidance on what would be the <b>most probable class</b> for that data point. For this, we use <b>Logistic Regression</b>. <div class="alert alert-success alertsuccess" style="margin-top: 20px"> <font size = 3><strong>Recall linear regression:</strong></font> <br> <br> As you know, <b>Linear regression</b> finds a function that relates a continuous dependent variable, <b>y</b>, to some predictors (independent variables $x_1$, $x_2$, etc.). For example, Simple linear regression assumes a function of the form: <br><br> $$ y = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \cdots $$ <br> and finds the values of parameters $\theta_0, \theta_1, \theta_2$, etc, where the term $\theta_0$ is the "intercept". It can be generally shown as: <br><br> $$ ℎ_\theta(𝑥) = \theta^TX $$ <p></p> </div> Logistic Regression is a variation of Linear Regression, useful when the observed dependent variable, <b>y</b>, is categorical. It produces a formula that predicts the probability of the class label as a function of the independent variables. Logistic regression fits a special s-shaped curve by taking the linear regression and transforming the numeric estimate into a probability with the following function, which is called sigmoid function 𝜎: $$ ℎ_\theta(𝑥) = \sigma({\theta^TX}) = \frac {e^{(\theta_0 + \theta_1 x_1 + \theta_2 x_2 +...)}}{1 + e^{(\theta_0 + \theta_1 x_1 + \theta_2 x_2 +\cdots)}} $$ Or: $$ ProbabilityOfaClass_1 = P(Y=1|X) = \sigma({\theta^TX}) = \frac{e^{\theta^TX}}{1+e^{\theta^TX}} $$ In this equation, ${\theta^TX}$ is the regression result (the sum of the variables weighted by the coefficients), `exp` is the exponential function and $\sigma(\theta^TX)$ is the sigmoid or [logistic function](http://en.wikipedia.org/wiki/Logistic_function), also called logistic curve. It is a common "S" shape (sigmoid curve). So, briefly, Logistic Regression passes the input through the logistic/sigmoid but then treats the result as a probability: <img src="https://ibm.box.com/shared/static/kgv9alcghmjcv97op4d6onkyxevk23b1.png" width="400" align="center"> The objective of __Logistic Regression__ algorithm, is to find the best parameters θ, for $ℎ_\theta(𝑥)$ = $\sigma({\theta^TX})$, in such a way that the model best predicts the class of each case. ### Customer churn with Logistic Regression A telecommunications company is concerned about the number of customers leaving their land-line business for cable competitors. They need to understand who is leaving. Imagine that you are an analyst at this company and you have to find out who is leaving and why. Lets first import required libraries: ``` import pandas as pd import pylab as pl import numpy as np import scipy.optimize as opt from sklearn import preprocessing %matplotlib inline import matplotlib.pyplot as plt ``` <h2 id="about_dataset">About the dataset</h2> We will use a telecommunications dataset for predicting customer churn. This is a historical customer dataset where each row represents one customer. The data is relatively easy to understand, and you may uncover insights you can use immediately. Typically it is less expensive to keep customers than acquire new ones, so the focus of this analysis is to predict the customers who will stay with the company. This data set provides information to help you predict what behavior will help you to retain customers. You can analyze all relevant customer data and develop focused customer retention programs. The dataset includes information about: - Customers who left within the last month – the column is called Churn - Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies - Customer account information – how long they had been a customer, contract, payment method, paperless billing, monthly charges, and total charges - Demographic info about customers – gender, age range, and if they have partners and dependents ### Load the Telco Churn data Telco Churn is a hypothetical data file that concerns a telecommunications company's efforts to reduce turnover in its customer base. Each case corresponds to a separate customer and it records various demographic and service usage information. Before you can work with the data, you must use the URL to get the ChurnData.csv. To download the data, we will use `!wget` to download it from IBM Object Storage. ``` #Click here and press Shift+Enter !wget -O ChurnData.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/ChurnData.csv ``` __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ### Load Data From CSV File ``` churn_df = pd.read_csv("ChurnData.csv") churn_df.head() ``` <h2 id="preprocessing">Data pre-processing and selection</h2> Lets select some features for the modeling. Also we change the target data type to be integer, as it is a requirement by the skitlearn algorithm: ``` churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']] churn_df['churn'] = churn_df['churn'].astype('int') churn_df.head() ``` ## Practice How many rows and columns are in this dataset in total? What are the name of columns? ``` # write your code here churn_df.shape ``` Lets define X, and y for our dataset: ``` X = np.asarray(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip']]) X[0:5] y = np.asarray(churn_df['churn']) y [0:5] ``` Also, we normalize the dataset: ``` from sklearn import preprocessing X = preprocessing.StandardScaler().fit(X).transform(X) X[0:5] ``` ## Train/Test dataset Okay, we split our dataset into train and test set: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) ``` <h2 id="modeling">Modeling (Logistic Regression with Scikit-learn)</h2> Lets build our model using __LogisticRegression__ from Scikit-learn package. This function implements logistic regression and can use different numerical optimizers to find parameters, including ‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’ solvers. You can find extensive information about the pros and cons of these optimizers if you search it in internet. The version of Logistic Regression in Scikit-learn, support regularization. Regularization is a technique used to solve the overfitting problem in machine learning models. __C__ parameter indicates __inverse of regularization strength__ which must be a positive float. Smaller values specify stronger regularization. Now lets fit our model with train set: ``` from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train) LR ``` Now we can predict using our test set: ``` yhat = LR.predict(X_test) yhat ``` __predict_proba__ returns estimates for all classes, ordered by the label of classes. So, the first column is the probability of class 1, P(Y=1|X), and second column is probability of class 0, P(Y=0|X): ``` yhat_prob = LR.predict_proba(X_test) yhat_prob ``` <h2 id="evaluation">Evaluation</h2> ### jaccard index Lets try jaccard index for accuracy evaluation. we can define jaccard as the size of the intersection divided by the size of the union of two label sets. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. ``` from sklearn.metrics import jaccard_similarity_score jaccard_similarity_score(y_test, yhat) ``` ### confusion matrix Another way of looking at accuracy of classifier is to look at __confusion matrix__. ``` from sklearn.metrics import classification_report, confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') print(confusion_matrix(y_test, yhat, labels=[1,0])) # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0]) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['churn=1','churn=0'],normalize= False, title='Confusion matrix') ``` Look at first row. The first row is for customers whose actual churn value in test set is 1. As you can calculate, out of 40 customers, the churn value of 15 of them is 1. And out of these 15, the classifier correctly predicted 6 of them as 1, and 9 of them as 0. It means, for 6 customers, the actual churn value were 1 in test set, and classifier also correctly predicted those as 1. However, while the actual label of 9 customers were 1, the classifier predicted those as 0, which is not very good. We can consider it as error of the model for first row. What about the customers with churn value 0? Lets look at the second row. It looks like there were 25 customers whom their churn value were 0. The classifier correctly predicted 24 of them as 0, and one of them wrongly as 1. So, it has done a good job in predicting the customers with churn value 0. A good thing about confusion matrix is that shows the model’s ability to correctly predict or separate the classes. In specific case of binary classifier, such as this example, we can interpret these numbers as the count of true positives, false positives, true negatives, and false negatives. ``` print (classification_report(y_test, yhat)) ``` Based on the count of each section, we can calculate precision and recall of each label: - __Precision__ is a measure of the accuracy provided that a class label has been predicted. It is defined by: precision = TP / (TP + FP) - __Recall__ is true positive rate. It is defined as: Recall =  TP / (TP + FN) So, we can calculate precision and recall of each class. __F1 score:__ Now we are in the position to calculate the F1 scores for each label based on the precision and recall of that label. The F1 score is the harmonic average of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0. It is a good way to show that a classifer has a good value for both recall and precision. And finally, we can tell the average accuracy for this classifier is the average of the F1-score for both labels, which is 0.72 in our case. ### log loss Now, lets try __log loss__ for evaluation. In logistic regression, the output can be the probability of customer churn is yes (or equals to 1). This probability is a value between 0 and 1. Log loss( Logarithmic loss) measures the performance of a classifier where the predicted output is a probability value between 0 and 1. ``` from sklearn.metrics import log_loss log_loss(y_test, yhat_prob) ``` <h2 id="practice">Practice</h2> Try to build Logistic Regression model again for the same dataset, but this time, use different __solver__ and __regularization__ values? What is new __logLoss__ value? ``` # write your code here ``` Double-click __here__ for the solution. <!-- Your answer is below: LR2 = LogisticRegression(C=0.01, solver='sag').fit(X_train,y_train) yhat_prob2 = LR2.predict_proba(X_test) print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) --> <h2>Want to learn more?</h2> IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a> Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a> <h3>Thanks for completing this lesson!</h3> <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4> <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p> <hr> <p>Copyright &copy; 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
github_jupyter
``` %matplotlib inline import matplotlib.pylab as plt import numpy as np from keras import objectives from keras import backend as K from keras import losses import tensorflow as tf import interactions_results import train_interactions OBJ_IDS = ['1', '2'] COLUMNS_MAP = [('x', 'ant%s_x'), ('y', 'ant%s_y'), ('major', 'ant%s_major'), ('minor', 'ant%s_minor'), ('angle_deg', 'ant%s_angle_deg'), ('dx', 'ant%s_dx'), ('dy', 'ant%s_dy'), ] COL = dict(COLUMNS_MAP) NAMES = reduce(list.__add__, [[value % i for key, value in COLUMNS_MAP] for i in OBJ_IDS]) COL2ID = {key: i for i, (key, value) in enumerate(COLUMNS_MAP)} COL2ID def angle_absolute_error(y_true, y_pred, backend, scaler=None): if scaler is not None: # y_pred_ = scaler.inverse_transform(y_pred[:, 4:5]) # this doesn't work with Tensors y_pred_ = y_pred[:, 4:5] * scaler[1] + scaler[0] else: y_pred_ = y_pred[:, 4:5] val = backend.abs(y_pred_ - y_true[:, 4:5]) % 180 return backend.minimum(val, 180 - val) def xy_absolute_error(y_true, y_pred, backend): return backend.abs(y_pred[:, :2] - y_true[:, :2]) def absolute_errors(y_true, y_pred, backend, angle_scaler): theta = angle_absolute_error(y_true, y_pred, backend, angle_scaler) pos = xy_absolute_error(y_true, y_pred, backend) return pos, theta def interaction_loss(y_true, y_pred, angle_scaler=None, alpha=0.5): assert 0 <= alpha <= 1 sum_errors_xy, sum_errors_angle, indices = match_pred_to_gt(y_true, y_pred, K, angle_scaler) return K.mean(tf.gather_nd(sum_errors_xy, indices) * (1 - alpha) + tf.gather_nd(sum_errors_angle, indices) * alpha) y_a = np.array([[10., 10, 25, 5, 20, 100, 100, 25, 5, 30], [100., 100, 25, 5, 30, 20, 20, 25, 5, 20], [10., 10, 25, 5, 20, 200, 200, 25, 5, 30]]) y_b = np.array([[20., 20, 25, 5, 30, 150, 170, 25, 5, 0], [30., 30, 25, 5, 30, 170, 150, 25, 5, 5], [30., 60, 25, 5, 30, 170, 120, 25, 5, 5]]) xy, angle, indices = train_interactions.match_pred_to_gt(y_a, y_b, np) print (xy[indices[:, 0], indices[:, 1]]).mean() print (angle[indices[:, 0], indices[:, 1]]).mean() # with h5py.File(DATA_DIR + '/imgs_inter_test.h5', 'r') as hf: # X_test = hf['data'][:] # y_a_ = interactions_results.tostruct(y_a) y_b_ = interactions_results.tostruct(y_b) i = 1 interactions_results.plot_interaction(y_a_[[i]], y_b_[[i]]) plt.ylim(0, 200) plt.xlim(0, 200) y_true = K.variable(y_a) y_pred = K.variable(y_b) backend = K angle_scaler = None K.eval(y_pred[:, 1:2]) K.eval(y_pred[:, [COL2ID['x'], COL2ID['y']]] - y_true[:, [COL2ID['x'], COL2ID['y']]]) y_true = y_a y_pred = y_b backend = np angle_scaler = None mean_errors_xy, mean_errors_angle, indices = train_interactions.match_pred_to_gt(y_true, y_pred, backend) for x in [mean_errors_xy, mean_errors_angle, indices]: print x mean_errors_xy, mean_errors_angle, indices = train_interactions.match_pred_to_gt(y_true, y_pred, backend) for x in [mean_errors_xy, mean_errors_angle, indices]: print K.eval(x) mean_errors_xy, mean_errors_angle, indices = train_interactions.match_pred_to_gt(y_true, y_pred, K) for x in [mean_errors_xy, mean_errors_angle, indices]: print K.eval(x) # def match_pred_to_gt(y_true, y_pred, backend, angle_scaler=None): """ Return mean absolute errors for individual samples for xy and theta in two possible combinations of prediction and ground truth. """ xy11, theta11 = absolute_errors(y_true[:, :5], y_pred[:, :5], backend, angle_scaler) xy22, theta22 = absolute_errors(y_true[:, 5:], y_pred[:, 5:], backend, angle_scaler) xy12, theta12 = absolute_errors(y_true[:, :5], y_pred[:, 5:], backend, angle_scaler) xy21, theta21 = absolute_errors(y_true[:, 5:], y_pred[:, :5], backend, angle_scaler) if backend == np: norm = np.linalg.norm int64 = np.int64 shape = lambda x, n: x.shape[n] else: norm = tf.linalg.norm int64 = tf.int64 shape = lambda x, n: backend.cast(backend.shape(x)[n], int64) mean_errors_xy = backend.stack((backend.mean(backend.stack((norm(xy11, axis=1), norm(xy22, axis=1))), axis=0), backend.mean(backend.stack((norm(xy12, axis=1), norm(xy21, axis=1))), axis=0))) # shape=(2, n) mean_errors_angle = backend.stack((backend.mean(backend.concatenate((theta11, theta22)), axis=1), backend.mean(backend.concatenate((theta12, theta21)), axis=1))) # shape=(2, n) print K.eval(theta11) print K.eval(backend.concatenate((theta11, theta22))) print K.eval(backend.sum(backend.concatenate((theta11, theta22)), axis=1)) swap_idx = backend.argmin(mean_errors_xy, axis=0) # shape = (n,) indices = backend.transpose( backend.stack((swap_idx, backend.arange(0, shape(mean_errors_xy, 1))))) # shape=(n, 2) # return mean_errors_xy, mean_errors_angle, indices for x in [mean_errors_xy, mean_errors_angle, indices]: print K.eval(x) angle_scaler = None y_true = K.variable(y_a) y_pred = K.variable(y_b) xy11, theta11 = absolute_errors(y_true[:, :5], y_pred[:, :5], angle_scaler) xy22, theta22 = absolute_errors(y_true[:, 5:], y_pred[:, 5:], angle_scaler) xy12, theta12 = absolute_errors(y_true[:, :5], y_pred[:, 5:], angle_scaler) xy21, theta21 = absolute_errors(y_true[:, 5:], y_pred[:, :5], angle_scaler) norm = tf.linalg.norm # print y_a # print y_b # print K.eval(xy11) # print K.eval(xy22) # print K.eval(xy12) # print K.eval(xy21) sum_errors_xy = K.stack((K.sum(K.stack((norm(xy11, axis=1), norm(xy22, axis=1))), axis=0), K.sum(K.stack((norm(xy12, axis=1), norm(xy21, axis=1))), axis=0))) # shape=(2, n) sum_errors_angle = K.stack((K.sum(K.concatenate((theta11, theta22)), axis=1), K.sum(K.concatenate((theta12, theta21)), axis=1))) # shape=(2, n) swap_idx = K.argmin(sum_errors_xy, axis=0) # shape = (n,) indices = K.transpose(K.stack((swap_idx, K.arange(0, K.cast(K.shape(sum_errors_xy)[1], tf.int64))))) # shape=(n, 2) print K.eval(tf.gather_nd(sum_errors_xy, idx)) print K.eval(tf.gather_nd(sum_errors_angle, idx)) print K.eval(tf.gather_nd(sum_errors_xy, idx) + tf.gather_nd(sum_errors_angle, idx)) idx = K.transpose(K.stack((swap_idx, K.arange(0, K.cast(K.shape(sum_errors_xy)[1], tf.int64))))) K.eval(tf.gather_nd(sum_errors_xy, idx)) K.eval(sum_errors_xy) K.eval(sum_errors_angle) K.eval(tf.gather_nd(sum_errors_angle, idx)) np.ca ```
github_jupyter
> Developed by [Yeison Nolberto Cardona Álvarez](https://github.com/yeisonCardona) > [Andrés Marino Álvarez Meza, PhD.](https://github.com/amalvarezme) > César Germán Castellanos Dominguez, PhD. > _Digital Signal Processing and Control Group_ | _Grupo de Control y Procesamiento Digital de Señales ([GCPDS](https://github.com/UN-GCPDS/))_ > _National University of Colombia at Manizales_ | _Universidad Nacional de Colombia sede Manizales_ ---- # OpenBCI-Stream High level Python module for EEG/EMG/ECG acquisition and distributed streaming for OpenBCI Cyton board. ![GitHub top language](https://img.shields.io/github/languages/top/un-gcpds/openbci-stream?) ![PyPI - License](https://img.shields.io/pypi/l/openbci-stream?) ![PyPI](https://img.shields.io/pypi/v/openbci-stream?) ![PyPI - Status](https://img.shields.io/pypi/status/openbci-stream?) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/openbci-stream?) ![GitHub last commit](https://img.shields.io/github/last-commit/un-gcpds/openbci-stream?) ![CodeFactor Grade](https://img.shields.io/codefactor/grade/github/UN-GCPDS/openbci-stream?) [![Documentation Status](https://readthedocs.org/projects/openbci-stream/badge/?version=latest)](https://openbci-stream.readthedocs.io/en/latest/?badge=latest) Comprise a set of scripts that deals with the configuration and connection with the board, also is compatible with both connection modes supported by [Cyton](https://shop.openbci.com/products/cyton-biosensing-board-8-channel?variant=38958638542): RFduino (Serial dongle) and Wi-Fi (with the OpenBCI Wi-Fi Shield). These drivers are a stand-alone library that can handle the board from three different endpoints: (i) a [Command-Line Interface](06-command_line_interface.ipynb) (CLI) with simple instructions configure, start and stop data acquisition, debug stream status, and register events markers; (ii) a [Python Module](03-data_acuisition.ipynb) with high-level instructions and asynchronous acquisition; (iii) an object-proxying using Remote Python Call (RPyC) for [distributed implementations](A4-server-based-acquisition.ipynb) that can manipulate the Python modules as if they were local, this last mode needs a daemon running in the remote host that will listen to connections and driving instructions. The main functionality of the drivers live on to serve real-time and distributed access to data flow, even on single machine implementations, this is achieved by implementing [Kafka](https://kafka.apache.org/) and their capabilities to create multiple topics for classifying the streaming, these topics are used to separate the neurophysiological data from the [event markers](05-stream_markers), so the clients can subscribe to a specific topic for injecting or read content, this means that is possible to implement an event register in a separate process that stream markers for all clients in real-time without handle dense time-series data. A crucial issue that stays on [time synchronization](A4-server-based_acquisition.ipynb#Step-5---Configure-time-server), all systems components in the network should have the same real-time protocol (RTP) server reference. ## Main features * **Asynchronous acquisition:** Acquisition and deserialization are done in uninterrupted parallel processes. In this way, the sampling rate keeps stable as long as possible. * **Distributed streaming system:** The acquisition, processing, visualizations, and any other system that needs to be fed with EEG/EMG/ECG real-time data can run with their architecture. * **Remote board handle:** Same code syntax for developing and debug Cython boards connected to any node in the distributed system. * **Command-line interface:** A simple interface for handle the start, stop, and access to data stream directly from the command line. * **Markers/Events handler:** Besides the marker boardmode available in Cyton, a stream channel for the reading and writing of markers is available for use in any development. * **Multiple boards:** Is possible to use multiple OpenBCI boards just by adding multiple endpoints to the commands. ## Examples ``` # Acquisition with blocking call from openbci_stream.acquisition import Cyton openbci = Cyton('serial', endpoint='/dev/ttyUSB0', capture_stream=True) # blocking call openbci.stream(15) # collect data for 15 seconds # openbci.eeg_time_series # openbci.aux_time_series # openbci.timestamp_time_series # Acquisition with asynchronous call from openbci_stream.acquisition import Cyton openbci = Cyton('wifi', endpoint='192.68.1.113', capture_stream=True) openbci.stream(15) # collect data for 15 seconds # asynchronous call openbci.start_stream() time.sleep(15) # collect data for 15 seconds openbci.stop_stream() # Remote acquisition from openbci_stream.acquisition import Cyton openbci = Cyton('serial', endpoint='/dev/ttyUSB0', host='192.168.1.1', capture_stream=True) # blocking call openbci.stream(15) # collect data for 15 seconds # Consumer for active streamming from openbci_stream.acquisition import OpenBCIConsumer with OpenBCIConsumer() as stream: for i, message in enumerate(stream): if message.topic == 'eeg': print(f"received {message.value['samples']} samples") if i == 9: break # Create stream then consume data from openbci_stream.acquisition import OpenBCIConsumer with OpenBCIConsumer(mode='serial', endpoint='/dev/ttyUSB0', streaming_package_size=250) as (stream, openbci): t0 = time.time() for i, message in enumerate(stream): if message.topic == 'eeg': print(f"{i}: received {message.value['samples']} samples") t0 = time.time() if i == 9: break # Acquisition with multiple boards from openbci_stream.acquisition import Cyton openbci = Cyton('wifi', endpoint=['192.68.1.113', '192.68.1.185'], capture_stream=True) openbci.stream(15) # collect data for 15 seconds # asynchronous call openbci.start_stream() time.sleep(15) # collect data for 15 seconds openbci.stop_stream() ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # tf.function で性能アップ <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/customization/performance"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/tutorials/customization/performance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ja/tutorials/customization/performance.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/ja/tutorials/customization/performance.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> TensorFlow 2.0 では Eager Execution が既定で有効になっています。ユーザーインターフェイスは直感的で柔軟です(演算を一度だけ行う場合にはずっと簡単に、かつ迅速に実行されます)。しかしながら、それは性能と展開の面での犠牲の上に成り立っています。 最高性能を得ながら、モデルをどこへでも展開できるようにするには、`tf.function` を使ってプログラムから計算グラフを作成します。 AutoGraph のおかげで、驚くほど多くの Python コードが tf.function でそのまま動作しますが、気をつけなければならない落とし穴も存在します。 ポイントと推奨事項は下記の通りです。 - オブジェクトの変更やリストへの追加のような Python の副作用に依存しないこと - tf.functions は NumPy の演算や Python の組み込み演算よりも、TensorFlow の演算に適していること - 迷ったときは、`for x in y` というイディオムを使うこと ``` from __future__ import absolute_import, division, print_function, unicode_literals try: %tensorflow_version 2.x except Exception: pass import tensorflow as tf import contextlib # 遭遇するかもしれないいくつかのエラーをデモするためのヘルパー関数 @contextlib.contextmanager def assert_raises(error_class): try: yield except error_class as e: print('Caught expected exception \n {}: {}'.format(error_class, e)) except Exception as e: print('Got unexpected exception \n {}: {}'.format(type(e), e)) else: raise Exception('Expected {} to be raised but no error was raised!'.format( error_class)) ``` あなたが定義した `tf.function` は TensorFlow Core の演算に似たものです。例えばそれを即時に実行することも、計算グラフで使うこともできますし、勾配を計算することも可能です。 ``` # function は演算のように振る舞う @tf.function def add(a, b): return a + b add(tf.ones([2, 2]), tf.ones([2, 2])) # [[2., 2.], [2., 2.]] # function は勾配を計算できる @tf.function def add(a, b): return a + b v = tf.Variable(1.0) with tf.GradientTape() as tape: result = add(v, 1.0) tape.gradient(result, v) # function 内で function を使うこともできる @tf.function def dense_layer(x, w, b): return add(tf.matmul(x, w), b) dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2])) ``` ## トレーシングとポリモーフィズム Python の動的型付けは、関数をさまざまな型の引数で呼び出すことができ、Python がそれぞれのシナリオで異なる動作をするということを意味します。 他方で、TensorFlow の計算グラフでは、dtype と shape の次元が静的であることが必要です。`tf.function` は、正しい計算グラフを生成するために必要なときには関数を再トレースして、このギャップをつなぐ役割を果たします。 異なる型の引数を使って関数を呼び出し、何が起きるか見てみましょう。 ``` # Function はポリモーフィック @tf.function def double(a): print("Tracing with", a) return a + a print(double(tf.constant(1))) print() print(double(tf.constant(1.1))) print() print(double(tf.constant("a"))) print() ``` トレースの動作を制御するためには、下記のようなテクニックを使います。 - 新しい `tf.function` を作成する。別々の `tf.function` オブジェクトがトレースを共有することはない。 - 特定のトレースを得るには `get_concrete_function` メソッドを使用する。 - 計算グラフの呼び出し時に1回だけトレースを行うには、 `input_signature` を指定して `tf.function` を呼び出す。 ``` print("Obtaining concrete trace") double_strings = double.get_concrete_function(tf.TensorSpec(shape=None, dtype=tf.string)) print("Executing traced function") print(double_strings(tf.constant("a"))) print(double_strings(a=tf.constant("b"))) print("Using a concrete trace with incompatible types will throw an error") with assert_raises(tf.errors.InvalidArgumentError): double_strings(tf.constant(1)) @tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32),)) def next_collatz(x): print("Tracing with", x) return tf.where(tf.equal(x % 2, 0), x // 2, 3 * x + 1) print(next_collatz(tf.constant([1, 2]))) # 1次元のテンソルを input signature として指定しているので、これは失敗する with assert_raises(ValueError): next_collatz(tf.constant([[1, 2], [3, 4]])) ``` ## いつ再トレースするのか? ポリモーフィックな `tf.function` はトレーシングによって生成された具象関数のキャッシュを保持しています。キャッシュのキーは、実際にはその関数の引数及びキーワード引数から生成されたキーのタプルです。`tf.Tensor` 引数から生成されるキーは、テンソルの shape と型です。Python の組み込み型引数から生成されるキーはその値です。それ以外の Python の型では、キーはオブジェクトの `id()` に基づいており、メソッドはクラスのインスタンスひとつずつ独立にトレースされます。将来、TensorFlowには、Python オブジェクトについて安全にテンソルに変換できるような、より洗練されたキャッシングが追加されるかもしれません。 ## 引数は Python か? Tensor か? しばしば、ハイパーパラメータやグラフ構成を制御するために Python の組み込み型の引数が使われます。例えば、`num_layers=10` や `training=True` あるいは `nonlinearity='relu'` のようにです。このため、この Python の組み込み型の引数が変更されると、計算グラフを再びトレースする必要があるということになります。 しかし、グラフの生成を制御するために Python の組み込み型の引数を使用する必要はありません。これらのケースでは、Python引数の値の変更が不必要な再トレースを引き起こす可能性があります。例えば、この訓練ループでは、AutoGraph は動的に展開を行います。複数回トレースを行っていますが、生成される計算グラフは全く変わりません。これは少し非効率です。 ``` def train_one_step(): pass @tf.function def train(num_steps): print("Tracing with num_steps = {}".format(num_steps)) for _ in tf.range(num_steps): train_one_step() train(num_steps=10) train(num_steps=20) ``` ここでの簡単な回避方法は、生成されたグラフの shape が変わらないのであれば、引数をテンソルにキャストすることです。 ``` train(num_steps=tf.constant(10)) train(num_steps=tf.constant(20)) ``` ## `tf.function` の中の副作用 一般的には、(印字やオブジェクト変更のような)Python の副作用は、トレーシングの最中にだけ発生します。それでは、どうしたら `tf.function` で安定的に副作用を起こすことができるでしょうか? 一般的な原則は、トレースをデバッグする際にだけ Python の副作用を使用するというものです。あるいは、`tf.Variable.assign`、`tf.print`、そして `tf.summary` のような TensorFlow の演算を使うことで、コードがトレースされるときにも、TensorFlowランタイムによって都度呼び出される際にも、確実に実行されるようにできます。一般には、関数型のスタイルを使用することで最も良い結果を得られます。 ``` @tf.function def f(x): print("Traced with", x) tf.print("Executed with", x) f(1) f(1) f(2) ``` `tf.function` が呼び出されるたびに Python のコードを実行したいのであれば、`tf.py_function` がぴったりです。`tf.py_function` の欠点は、ポータブルでないこと、それほど性能が高くないこと、(マルチGPU、TPUの)分散環境ではうまく動作しないことなどです。また、`tf.py_function` は計算グラフに組み込まれるため、入出力すべてをテンソルにキャストします。 ``` external_list = [] def side_effect(x): print('Python side effect') external_list.append(x) @tf.function def f(x): tf.py_function(side_effect, inp=[x], Tout=[]) f(1) f(1) f(1) assert len(external_list) == 3 # .numpy() call required because py_function casts 1 to tf.constant(1) assert external_list[0].numpy() == 1 ``` ## Python の状態に注意 ジェネレーターやイテレーターなど Python の機能の多くは、状態を追跡するために Python のランタイムに依存しています。これらの仕組みは、一般的には Eager モードでも期待通りに動作しますが、トレーシングの振る舞いにより、`tf.function` の中では予期しないことが起きることがあります。 1例として、イテレーターの状態が進むのは Python の副作用であり、トレーシングの中だけで発生します。 ``` external_var = tf.Variable(0) @tf.function def buggy_consume_next(iterator): external_var.assign_add(next(iterator)) tf.print("Value of external_var:", external_var) iterator = iter([0, 1, 2, 3]) buggy_consume_next(iterator) # 次のコードは、イテレーターの次の値を使うのではなく、最初の値を再利用する buggy_consume_next(iterator) buggy_consume_next(iterator) ``` イテレーターが tf.function の中で生成されすべて使われる場合には、正しく動作するはずです。しかし、イテレーター全体がトレースされることとなり、巨大な計算グラフの生成をまねく可能性があります。これは、望みどおりの動作かもしれません。しかし、もし Python のリストとして表されたメモリー上の巨大なデータセットを使って訓練を行うとすると、これは非常に大きな計算グラフを生成することになり、`tf.function` がスピードアップにはつながらないと考えられます。 Python データを繰り返し使用する場合、もっとも安全な方法は tf.data.Dataset でラップして、`for x in y` というイディオムを使用することです。AutoGraph には、`y` がテンソルあるいは tf.data.Dataset である場合、`for` ループを安全に変換する特別な機能があります。 ``` def measure_graph_size(f, *args): g = f.get_concrete_function(*args).graph print("{}({}) contains {} nodes in its graph".format( f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node))) @tf.function def train(dataset): loss = tf.constant(0) for x, y in dataset: loss += tf.abs(y - x) # ダミー計算 return loss small_data = [(1, 1)] * 2 big_data = [(1, 1)] * 10 measure_graph_size(train, small_data) measure_graph_size(train, big_data) measure_graph_size(train, tf.data.Dataset.from_generator( lambda: small_data, (tf.int32, tf.int32))) measure_graph_size(train, tf.data.Dataset.from_generator( lambda: big_data, (tf.int32, tf.int32))) ``` Python/Numpy のデータを Dataset でラップする際には、`tf.data.Dataset.from_generator` と `tf.data.Dataset.from_tensors` の違いに留意しましょう。前者はデータを Python のまま保持し `tf.py_function` を通じて取得するため、性能に影響する場合があります。これに対して後者はデータのコピーを計算グラフの中の、ひとつの大きな `tf.constant()` に結びつけるため、メモリー消費に影響する可能性があります。 TFRecordDataset/CsvDataset/などを通じてデータをファイルから読み込むことが、データを使用する最も効率的な方法です。TensorFlow 自身が Python とは関係なく非同期のデータ読み込みとプリフェッチを管理することができるからです。 ## 自動的な依存関係の制御 プログラミングモデルとしての関数が一般的なデータフローグラフに対して非常に優位である点は、意図したコードの振る舞いがどのようなものであるかということについて、より多くの情報をランタイムに与えられるということにあります。 例えば、同じ変数を何度も読んだり書いたりするコードを書く場合、データフローグラフではもともと意図されていた演算の順番を自然に組み込むわけではありません。`tf.function` の中では、もともとの Python コードの文の実行順序を参照することで、実行順序の曖昧さを解消します。これにより、`tf.function` の中のステートフルな演算の順序が、先行実行モードのセマンティクスを模していることになります。 これは、手動で制御の依存関係を加える必要がないことを意味しています。`tf.function` は十分賢いので、あなたのコードが正しく動作するために必要十分な最小限の制御の依存関係を追加してくれます。 ``` # 自動的な依存関係の制御 a = tf.Variable(1.0) b = tf.Variable(2.0) @tf.function def f(x, y): a.assign(y * b) b.assign_add(x * a) return a + b f(1.0, 2.0) # 10.0 ``` ## 変数 `tf.function` の中では、意図したコードの実行順序を活用するという同じアイデアを使って、変数の作成と活用を簡単に行うことができます。しかし、ひとつだけ非常に重要な欠点があります。それは、変数を使った場合、先行実行モードとグラフモードでは動作が変わるコードを書いてしまう可能性があるということです。 特に、呼び出しの都度新しい変数を作成する場合にこれが発生します。トレーシングの意味では、`tf.function` は呼び出しのたびに同じ変数を再利用しますが、Eager モードでは呼び出しごとに新しい変数を生成します。この間違いを防止するため、`tf.function` は危険な変数の生成動作を見つけるとエラーを発生させます。 ``` @tf.function def f(x): v = tf.Variable(1.0) v.assign_add(x) return v with assert_raises(ValueError): f(1.0) # しかし、曖昧さの無いコードは大丈夫 v = tf.Variable(1.0) @tf.function def f(x): return v.assign_add(x) print(f(1.0)) # 2.0 print(f(2.0)) # 4.0 # 初めて関数が実行されるときだけ変数が生成されることを保証できれば # tf.function 内で変数を作成できる class C: pass obj = C(); obj.v = None @tf.function def g(x): if obj.v is None: obj.v = tf.Variable(1.0) return obj.v.assign_add(x) print(g(1.0)) # 2.0 print(g(2.0)) # 4.0 # 変数の初期化は、関数の引数や他の変数の値に依存可能 # 制御の依存関係を生成するのと同じ手法で、正しい初期化の順序を発見可能 state = [] @tf.function def fn(x): if not state: state.append(tf.Variable(2.0 * x)) state.append(tf.Variable(state[0] * 3.0)) return state[0] * x * state[1] print(fn(tf.constant(1.0))) print(fn(tf.constant(3.0))) ``` # AutoGraph の使用 [autograph](https://www.tensorflow.org/guide/function) ライブラリは `tf.function` に完全に統合されており、計算グラフの中で動的に実行される条件文や繰り返しを書くことができます。 `tf.cond` や `tf.while_loop` は `tf.function` でも使えますが、制御フローを含むコードは、命令形式で書いたほうが書きやすいし理解しやすいです。 ``` # 単純な繰り返し @tf.function def f(x): while tf.reduce_sum(x) > 1: tf.print(x) x = tf.tanh(x) return x f(tf.random.uniform([5])) # 興味があれば AutoGraph が生成するコードを調べることができる # ただし、アセンブリ言語を読むような感じがする def f(x): while tf.reduce_sum(x) > 1: tf.print(x) x = tf.tanh(x) return x print(tf.autograph.to_code(f)) ``` ## AutoGraph: 条件分岐 AutoGraph は `if` 文を等価である `tf.cond` の呼び出しに変換します。 この置換は条件がテンソルである場合に行われます。そうでない場合には、条件分岐はトレーシングの中で実行されます。 ``` def test_tf_cond(f, *args): g = f.get_concrete_function(*args).graph if any(node.name == 'cond' for node in g.as_graph_def().node): print("{}({}) uses tf.cond.".format( f.__name__, ', '.join(map(str, args)))) else: print("{}({}) executes normally.".format( f.__name__, ', '.join(map(str, args)))) @tf.function def hyperparam_cond(x, training=True): if training: x = tf.nn.dropout(x, rate=0.5) return x @tf.function def maybe_tensor_cond(x): if x < 0: x = -x return x test_tf_cond(hyperparam_cond, tf.ones([1], dtype=tf.float32)) test_tf_cond(maybe_tensor_cond, tf.constant(-1)) test_tf_cond(maybe_tensor_cond, -1) ``` `tf.cond` には、色々と注意すべき細かな点があります。 - `tf.cond` は条件分岐の両方をトレーシングし、条件に従って実行時に適切な分岐を選択することで機能します。分岐の両方をトレースすることで、Python プログラムを予期せず実行する可能性があります。 - `tf.cond` では、分岐の一方が後ほど使用されるテンソルを作成する場合、もう一方の分岐もそのテンソルを作成することが必要です。 ``` @tf.function def f(): x = tf.constant(0) if tf.constant(True): x = x + 1 print("Tracing `then` branch") else: x = x - 1 print("Tracing `else` branch") return x f() @tf.function def f(): if tf.constant(True): x = tf.ones([3, 3]) return x # 分岐のどちらの枝でも `x` を定義する必要があるためエラーが発生 with assert_raises(ValueError): f() ``` ## AutoGraph と繰り返し AutoGraph には繰り返しの変換にいくつかの単純なルールがあります。 - `for`: 反復可能オブジェクトがテンソルである場合に変換する - `while`: while 条件がテンソルに依存している場合に変換する 繰り返しが変換される場合、`tf.while_loop` によって動的に展開されます。あるいは、 `for x in tf.data.Dataset` という特別なケースの場合には、 `tf.data.Dataset.reduce` に変換されます。 繰り返しが変換されない場合、それは静的に展開されます。 ``` def test_dynamically_unrolled(f, *args): g = f.get_concrete_function(*args).graph if any(node.name == 'while' for node in g.as_graph_def().node): print("{}({}) uses tf.while_loop.".format( f.__name__, ', '.join(map(str, args)))) elif any(node.name == 'ReduceDataset' for node in g.as_graph_def().node): print("{}({}) uses tf.data.Dataset.reduce.".format( f.__name__, ', '.join(map(str, args)))) else: print("{}({}) gets unrolled.".format( f.__name__, ', '.join(map(str, args)))) @tf.function def for_in_range(): x = 0 for i in range(5): x += i return x test_dynamically_unrolled(for_in_range) @tf.function def for_in_tfrange(): x = tf.constant(0, dtype=tf.int32) for i in tf.range(5): x += i return x test_dynamically_unrolled(for_in_tfrange) @tf.function def for_in_tfdataset(): x = tf.constant(0, dtype=tf.int64) for i in tf.data.Dataset.range(5): x += i return x test_dynamically_unrolled(for_in_tfdataset) @tf.function def while_py_cond(): x = 5 while x > 0: x -= 1 return x test_dynamically_unrolled(while_py_cond) @tf.function def while_tf_cond(): x = tf.constant(5) while x > 0: x -= 1 return x test_dynamically_unrolled(while_tf_cond) ``` 繰り返しに、テンソルに依存する `break` や、途中での `return` がある場合、一番外側の条件あるいは反復可能オブジェクトはテンソルである必要があります。 比較してみましょう。 ``` @tf.function def while_py_true_py_break(x): while True: # py true if x == 0: # py break break x -= 1 return x test_dynamically_unrolled(while_py_true_py_break, 5) @tf.function def buggy_while_py_true_tf_break(x): while True: # py true if tf.equal(x, 0): # tf break break x -= 1 return x with assert_raises(TypeError): test_dynamically_unrolled(buggy_while_py_true_tf_break, 5) @tf.function def while_tf_true_tf_break(x): while tf.constant(True): # tf true if x == 0: # py break break x -= 1 return x test_dynamically_unrolled(while_tf_true_tf_break, 5) @tf.function def buggy_py_for_tf_break(): x = 0 for i in range(5): # py for if tf.equal(i, 3): # tf break break x += i return x with assert_raises(TypeError): test_dynamically_unrolled(buggy_py_for_tf_break) @tf.function def tf_for_py_break(): x = 0 for i in tf.range(5): # tf for if i == 3: # py break break x += i return x test_dynamically_unrolled(tf_for_py_break) ``` 動的に展開される繰り返しの結果を集計するため、`tf.TensorArray` を使いたくなるかもしれません。 ``` batch_size = 2 seq_len = 3 feature_size = 4 def rnn_step(inp, state): return inp + state @tf.function def dynamic_rnn(rnn_step, input_data, initial_state): # [batch, time, features] -> [time, batch, features] input_data = tf.transpose(input_data, [1, 0, 2]) max_seq_len = input_data.shape[0] states = tf.TensorArray(tf.float32, size=max_seq_len) state = initial_state for i in tf.range(max_seq_len): state = rnn_step(input_data[i], state) states = states.write(i, state) return tf.transpose(states.stack(), [1, 0, 2]) dynamic_rnn(rnn_step, tf.random.uniform([batch_size, seq_len, feature_size]), tf.zeros([batch_size, feature_size])) ``` `tf.cond` と同様に、`tf.while_loop` にも、色々と注意すべき細かな点があります。 - 繰り返しの実行回数が 0 である可能性があるため、while_loop の後で使用されるテンソルは、繰り返しの前に初期化されなければならない - すべての繰り返しの変数は、各繰り返しを通じてその形状と dtype が変わらないことが必要 ``` @tf.function def buggy_loop_var_uninitialized(): for i in tf.range(3): x = i return x with assert_raises(ValueError): buggy_loop_var_uninitialized() @tf.function def f(): x = tf.constant(0) for i in tf.range(3): x = i return x f() @tf.function def buggy_loop_type_changes(): x = tf.constant(0, dtype=tf.float32) for i in tf.range(3): # tf.int32 型のテンソルを1つづつ取り出して… x = i return x with assert_raises(tf.errors.InvalidArgumentError): buggy_loop_type_changes() @tf.function def buggy_concat(): x = tf.ones([0, 10]) for i in tf.range(5): x = tf.concat([x, tf.ones([1, 10])], axis=0) return x with assert_raises(ValueError): buggy_concat() @tf.function def concat_with_padding(): x = tf.zeros([5, 10]) for i in tf.range(5): x = tf.concat([x[:i], tf.ones([1, 10]), tf.zeros([4-i, 10])], axis=0) x.set_shape([5, 10]) return x concat_with_padding() ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = "retina" # print(plt.style.available) plt.style.use("ggplot") # plt.style.use("fivethirtyeight") plt.style.use("seaborn-talk") from tqdm import tnrange, tqdm_notebook def uniform_linear_array(n_mics, spacing): return spacing*np.arange(-(n_mics-1)/2, (n_mics-1)/2+1).reshape(1, n_mics) def compute_MVDR_weight(source_steering_vector, signals): snapshot = signals.shape[1] sample_covariance_matrix = signals.dot(signals.transpose().conjugate()) / snapshot inverse_sample_covariance_matrix = np.linalg.inv(sample_covariance_matrix) normalization_factor = (source_steering_vector.transpose().conjugate().dot(inverse_sample_covariance_matrix).dot(source_steering_vector)) weight = inverse_sample_covariance_matrix.dot(source_steering_vector) / normalization_factor return weight def compute_steering_vector_ULA(u, microphone_array): return np.exp(1j*2*np.pi*microphone_array.geometry*u).reshape((microphone_array.n_mics, 1)) def generate_gaussian_samples(power, shape): return np.sqrt(power/2)*np.random.randn(shape[0], shape[1]) + 1j*np.sqrt(power/2)*np.random.randn(shape[0], shape[1]); # signal samples class MicrophoneArray(): def __init__(self, array_geometry): self.dim = array_geometry.shape[0] self.n_mics = array_geometry.shape[1] self.geometry = array_geometry class BaseDLBeamformer(object): def __init__(self, vs, bf_type="MVDR"): """ Parameters ---------- vs: Source manifold array vector bf_type: Type of beamformer """ self.vs = vs self.bf_type = bf_type self.weights_ = None def _compute_weights(self, training_data): n_training_samples = len(training_data) n_mics, snapshot = training_data[0].shape D = np.zeros((n_mics, n_training_samples), dtype=complex) for i_training_sample in range(n_training_samples): nv = training_data[i_training_sample] if self.bf_type == "MVDR": w = compute_MVDR_weight(vs, nv) D[:, i_training_sample] = w.reshape(n_mics,) return D def _initialize(self, X): pass def _choose_weights(self, x): n_dictionary_atoms = self.weights_.shape[1] R = x.dot(x.transpose().conjugate()) proxy = np.diagonal(self.weights_.transpose().conjugate().dot(R).dot(self.weights_)) optimal_weight_index = np.argmin(proxy) return self.weights_[:, optimal_weight_index] def fit(self, training_data): """ Parameters ---------- X: shape = [n_samples, n_features] """ D = self._compute_weights(training_data) self.weights_ = D return self def choose_weights(self, x): return self._choose_weights(x) ``` #### Setup ``` array_geometry = uniform_linear_array(n_mics=10, spacing=0.5) microphone_array = MicrophoneArray(array_geometry) us = 0 vs = compute_steering_vector_ULA(us, microphone_array) SNRs = np.arange(0, 31, 10) n_SNRs = len(SNRs) sigma_n = 1 ``` #### Training data ``` n_training_samples = 5000 training_snapshots = [10, 50, 1000] interference_powers = [10, 20, 30] n_interference_list = [1, 2, 3] # interference_powers = [20] # n_interference_list = [1] # sigma = 10**(20/10) training_noise_interference_data_various_snapshots = [] for training_snapshot in training_snapshots: training_noise_interference_data = [] for i_training_sample in range(n_training_samples): n_interferences = np.random.choice(n_interference_list) nv = np.zeros((microphone_array.n_mics, training_snapshot), dtype=complex) for _ in range(n_interferences): u = np.random.uniform(0, 1) vi = compute_steering_vector_ULA(u, microphone_array) sigma = 10**(np.random.choice(interference_powers)/10) ii = generate_gaussian_samples(power=sigma, shape=(1, training_snapshot)) nv += vi.dot(ii) noise = generate_gaussian_samples(power=sigma_n, shape=(microphone_array.n_mics, training_snapshot)) nv += noise training_noise_interference_data.append(nv) training_noise_interference_data_various_snapshots.append(training_noise_interference_data) ``` #### Train baseline dictionary ``` dictionaries = [] for i_training_snapshot in range(len(training_snapshots)): training_noise_interference_data = training_noise_interference_data_various_snapshots[i_training_snapshot] dictionary = BaseDLBeamformer(vs) dictionary.fit(training_noise_interference_data); dictionaries.append(dictionary) ``` #### Testing ``` n_trials = 200 snapshots = np.array([10, 20, 30, 40, 60, 100, 200, 500, 1000]) n_snapshots = len(snapshots) ui1 = np.random.uniform(0, 1) ui2 = np.random.uniform(0, 1) sigma_1 = 10**(20/10) sigma_2 = 0*10**(20/10) vi1 = compute_steering_vector_ULA(ui1, microphone_array) vi2 = compute_steering_vector_ULA(ui2, microphone_array) n_interferences = np.random.choice(n_interference_list) interference_steering_vectors = [] for _ in range(n_interferences): u = np.random.uniform(0, 1) vi = compute_steering_vector_ULA(u, microphone_array) interference_steering_vectors.append(vi) sinr_snr_mvdr = np.zeros((n_SNRs, n_snapshots)) sinr_snr_mpdr = np.zeros((n_SNRs, n_snapshots)) sinr_snr_baseline_mpdr = np.zeros((len(training_snapshots), n_SNRs, n_snapshots)) for i_SNR in tqdm_notebook(range(n_SNRs), desc="SNRs"): sigma_s = 10**(SNRs[i_SNR] / 10) Rs = sigma_s * vs.dot(vs.transpose().conjugate()) for i_snapshot in tqdm_notebook(range(n_snapshots), desc="Snapshots", leave=False): snapshot = snapshots[i_snapshot] sinr_mvdr = np.zeros(n_trials) sinr_mpdr = np.zeros(n_trials) sinr_baseline_mpdr = np.zeros((len(training_snapshots), n_trials)) for i_trial in range(n_trials): ss = generate_gaussian_samples(power=sigma_s, shape=(1, snapshot)) # signal samples nn = generate_gaussian_samples(power=sigma_n, shape=(microphone_array.n_mics, snapshot)) # Gaussian noise samples # ii1 = generate_gaussian_samples(power=sigma_1, shape=(1, snapshot)) # first interference samples # ii2 = generate_gaussian_samples(power=sigma_2, shape=(1, snapshot)) # second interference samples nv = np.zeros((microphone_array.n_mics, snapshot), dtype=complex) Rn = np.zeros((microphone_array.n_mics, microphone_array.n_mics), dtype=complex) for i_interference in range(n_interferences): sigma = 10**(np.random.choice(interference_powers)/10) ii = generate_gaussian_samples(power=sigma, shape=(1, snapshot)) nv += interference_steering_vectors[i_interference].dot(ii) Rn += sigma*interference_steering_vectors[i_interference].dot(interference_steering_vectors[i_interference].transpose().conjugate()) Rn += sigma_n*np.identity(microphone_array.n_mics) Rninv = np.linalg.inv(Rn) Wo = Rninv.dot(vs) / (vs.transpose().conjugate().dot(Rninv).dot(vs)) SINRopt = ( np.real(Wo.transpose().conjugate().dot(Rs).dot(Wo)) / np.real(Wo.transpose().conjugate().dot(Rn).dot(Wo)) )[0][0] nv += nn sv = vs.dot(ss) xx = sv + nv wv = compute_MVDR_weight(vs, nv) wp = compute_MVDR_weight(vs, xx) for i_dictionary in range(len(dictionaries)): dictionary = dictionaries[i_dictionary] w_baseline_p = dictionary.choose_weights(xx) sinr_baseline_mpdr[i_dictionary, i_trial] = np.real(w_baseline_p.transpose().conjugate().dot(Rs).dot(w_baseline_p)) / np.real(w_baseline_p.transpose().conjugate().dot(Rn).dot(w_baseline_p)) sinr_mvdr[i_trial] = np.real(wv.transpose().conjugate().dot(Rs).dot(wv)) / np.real(wv.transpose().conjugate().dot(Rn).dot(wv)) sinr_mpdr[i_trial] = np.real(wp.transpose().conjugate().dot(Rs).dot(wp)) / np.real(wp.transpose().conjugate().dot(Rn).dot(wp)) sinr_snr_mvdr[i_SNR, i_snapshot] = np.sum(sinr_mvdr) / n_trials sinr_snr_mpdr[i_SNR, i_snapshot] = np.sum(sinr_mpdr) / n_trials for i_dictionary in range(len(dictionaries)): sinr_snr_baseline_mpdr[i_dictionary, i_SNR, i_snapshot] = np.sum(sinr_baseline_mpdr[i_dictionary, :]) / n_trials ``` #### Visualize results ``` fig = plt.figure(figsize=(9, 6*n_SNRs)); for i_SNR in range(n_SNRs): sigma_s = 10**(SNRs[i_SNR] / 10) Rs = sigma_s * vs.dot(vs.transpose().conjugate()) SINRopt = ( np.real(Wo.transpose().conjugate().dot(Rs).dot(Wo)) / np.real(Wo.transpose().conjugate().dot(Rn).dot(Wo)) )[0][0] ax = fig.add_subplot(n_SNRs, 1, i_SNR+1) ax.semilogx(snapshots, 10*np.log10(sinr_snr_mvdr[i_SNR, :]), marker="o", label="MVDR") ax.semilogx(snapshots, 10*np.log10(sinr_snr_mpdr[i_SNR, :]), marker="*", label="MPDR") for i_training_snapshot in range(len(training_snapshots)): ax.semilogx(snapshots, 10*np.log10(sinr_snr_baseline_mpdr[i_training_snapshot, i_SNR, :]), label="Baseline - {} training snapshots".format(training_snapshots[i_training_snapshot])) ax.set_xlim(10, 1000); ax.set_ylim(-10, 45) ax.legend(loc="lower right") ax.set_xlabel("Number of snapshots") ax.set_ylabel(r"$SINR_0$ [dB]") ax.set_title("Testing performance, {} training samples".format(n_training_samples)) plt.tight_layout() fig.savefig("baseline_dl_mvdr_various_interferences.jpg", dpi=600) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.png) # Automated Machine Learning _**Orange Juice Sales Forecasting**_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Compute](#Compute) 1. [Data](#Data) 1. [Train](#Train) 1. [Predict](#Predict) 1. [Operationalize](#Operationalize) ## Introduction In this example, we use AutoML to train, select, and operationalize a time-series forecasting model for multiple time-series. Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook. The examples in the follow code samples use the University of Chicago's Dominick's Finer Foods dataset to forecast orange juice sales. Dominick's was a grocery chain in the Chicago metropolitan area. ## Setup ``` import azureml.core import pandas as pd import numpy as np import logging from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig from azureml.automl.core.featurization import FeaturizationConfig ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.19.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") ``` As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem. ``` ws = Workspace.from_config() # choose a name for the run history container in the workspace experiment_name = 'automl-ojforecasting' experiment = Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['SKU'] = ws.sku output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Run History Name'] = experiment_name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Compute You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. #### Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster amlcompute_cluster_name = "oj-cluster" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` ## Data You are now ready to load the historical orange juice sales data. We will load the CSV file into a plain pandas DataFrame; the time column in the CSV is called _WeekStarting_, so it will be specially parsed into the datetime type. ``` time_column_name = 'WeekStarting' data = pd.read_csv("dominicks_OJ.csv", parse_dates=[time_column_name]) data.head() ``` Each row in the DataFrame holds a quantity of weekly sales for an OJ brand at a single store. The data also includes the sales price, a flag indicating if the OJ brand was advertised in the store that week, and some customer demographic information based on the store location. For historical reasons, the data also include the logarithm of the sales quantity. The Dominick's grocery data is commonly used to illustrate econometric modeling techniques where logarithms of quantities are generally preferred. The task is now to build a time-series model for the _Quantity_ column. It is important to note that this dataset is comprised of many individual time-series - one for each unique combination of _Store_ and _Brand_. To distinguish the individual time-series, we define the **time_series_id_column_names** - the columns whose values determine the boundaries between time-series: ``` time_series_id_column_names = ['Store', 'Brand'] nseries = data.groupby(time_series_id_column_names).ngroups print('Data contains {0} individual time-series.'.format(nseries)) ``` For demonstration purposes, we extract sales time-series for just a few of the stores: ``` use_stores = [2, 5, 8] data_subset = data[data.Store.isin(use_stores)] nseries = data_subset.groupby(time_series_id_column_names).ngroups print('Data subset contains {0} individual time-series.'.format(nseries)) ``` ### Data Splitting We now split the data into a training and a testing set for later forecast evaluation. The test set will contain the final 20 weeks of observed sales for each time-series. The splits should be stratified by series, so we use a group-by statement on the time series identifier columns. ``` n_test_periods = 20 def split_last_n_by_series_id(df, n): """Group df by series identifiers and split on last n rows for each group.""" df_grouped = (df.sort_values(time_column_name) # Sort by ascending time .groupby(time_series_id_column_names, group_keys=False)) df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n]) df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:]) return df_head, df_tail train, test = split_last_n_by_series_id(data_subset, n_test_periods) ``` ### Upload data to datastore The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the train and test data and create [tabular datasets](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training and testing. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation. ``` train.to_csv (r'./dominicks_OJ_train.csv', index = None, header=True) test.to_csv (r'./dominicks_OJ_test.csv', index = None, header=True) datastore = ws.get_default_datastore() datastore.upload_files(files = ['./dominicks_OJ_train.csv', './dominicks_OJ_test.csv'], target_path = 'dataset/', overwrite = True,show_progress = True) ``` ### Create dataset for training ``` from azureml.core.dataset import Dataset train_dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('dataset/dominicks_OJ_train.csv')) train_dataset.to_pandas_dataframe().tail() ``` ## Modeling For forecasting tasks, AutoML uses pre-processing and estimation steps that are specific to time-series. AutoML will undertake the following pre-processing steps: * Detect time-series sample frequency (e.g. hourly, daily, weekly) and create new records for absent time points to make the series regular. A regular time series has a well-defined frequency and has a value at every sample point in a contiguous time span * Impute missing values in the target (via forward-fill) and feature columns (using median column values) * Create features based on time series identifiers to enable fixed effects across different series * Create time-based features to assist in learning seasonal patterns * Encode categorical variables to numeric quantities In this notebook, AutoML will train a single, regression-type model across **all** time-series in a given training set. This allows the model to generalize across related series. If you're looking for training multiple models for different time-series, please see the many-models notebook. You are almost ready to start an AutoML training job. First, we need to separate the target column from the rest of the DataFrame: ``` target_column_name = 'Quantity' ``` ## Customization The featurization customization in forecasting is an advanced feature in AutoML which allows our customers to change the default forecasting featurization behaviors and column types through `FeaturizationConfig`. The supported scenarios include: 1. Column purposes update: Override feature type for the specified column. Currently supports DateTime, Categorical and Numeric. This customization can be used in the scenario that the type of the column cannot correctly reflect its purpose. Some numerical columns, for instance, can be treated as Categorical columns which need to be converted to categorical while some can be treated as epoch timestamp which need to be converted to datetime. To tell our SDK to correctly preprocess these columns, a configuration need to be add with the columns and their desired types. 2. Transformer parameters update: Currently supports parameter change for Imputer only. User can customize imputation methods. The supported imputing methods for target column are constant and ffill (forward fill). The supported imputing methods for feature columns are mean, median, most frequent, constant and ffill (forward fill). This customization can be used for the scenario that our customers know which imputation methods fit best to the input data. For instance, some datasets use NaN to represent 0 which the correct behavior should impute all the missing value with 0. To achieve this behavior, these columns need to be configured as constant imputation with `fill_value` 0. 3. Drop columns: Columns to drop from being featurized. These usually are the columns which are leaky or the columns contain no useful data. ``` featurization_config = FeaturizationConfig() featurization_config.drop_columns = ['logQuantity'] # 'logQuantity' is a leaky feature, so we remove it. # Force the CPWVOL5 feature to be numeric type. featurization_config.add_column_purpose('CPWVOL5', 'Numeric') # Fill missing values in the target column, Quantity, with zeros. featurization_config.add_transformer_params('Imputer', ['Quantity'], {"strategy": "constant", "fill_value": 0}) # Fill missing values in the INCOME column with median value. featurization_config.add_transformer_params('Imputer', ['INCOME'], {"strategy": "median"}) # Fill missing values in the Price column with forward fill (last value carried forward). featurization_config.add_transformer_params('Imputer', ['Price'], {"strategy": "ffill"}) ``` ## Forecasting Parameters To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment. |Property|Description| |-|-| |**time_column_name**|The name of your time column.| |**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).| |**time_series_id_column_names**|The column names used to uniquely identify the time series in data that has multiple rows with the same timestamp. If the time series identifiers are not defined, the data set is assumed to be one time series.| ## Train The [AutoMLConfig](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py) object defines the settings and data for an AutoML training job. Here, we set necessary inputs like the task type, the number of AutoML iterations to try, the training data, and cross-validation parameters. For forecasting tasks, there are some additional parameters that can be set in the `ForecastingParameters` class: the name of the column holding the date/time, the timeseries id column names, and the maximum forecast horizon. A time column is required for forecasting, while the time_series_id is optional. If time_series_id columns are not given, AutoML assumes that the whole dataset is a single time-series. We also pass a list of columns to drop prior to modeling. The _logQuantity_ column is completely correlated with the target quantity, so it must be removed to prevent a target leak. The forecast horizon is given in units of the time-series frequency; for instance, the OJ series frequency is weekly, so a horizon of 20 means that a trained model will estimate sales up to 20 weeks beyond the latest date in the training data for each series. In this example, we set the forecast horizon to the number of samples per series in the test set (n_test_periods). Generally, the value of this parameter will be dictated by business needs. For example, a demand planning application that estimates the next month of sales should set the horizon according to suitable planning time-scales. Please see the [energy_demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand) for more discussion of forecast horizon. We note here that AutoML can sweep over two types of time-series models: * Models that are trained for each series such as ARIMA and Facebook's Prophet. * Models trained across multiple time-series using a regression approach. In the first case, AutoML loops over all time-series in your dataset and trains one model (e.g. AutoArima or Prophet, as the case may be) for each series. This can result in long runtimes to train these models if there are a lot of series in the data. One way to mitigate this problem is to fit models for different series in parallel if you have multiple compute cores available. To enable this behavior, set the `max_cores_per_iteration` parameter in your AutoMLConfig as shown in the example in the next cell. Finally, a note about the cross-validation (CV) procedure for time-series data. AutoML uses out-of-sample error estimates to select a best pipeline/model, so it is important that the CV fold splitting is done correctly. Time-series can violate the basic statistical assumptions of the canonical K-Fold CV strategy, so AutoML implements a [rolling origin validation](https://robjhyndman.com/hyndsight/tscv/) procedure to create CV folds for time-series data. To use this procedure, you just need to specify the desired number of CV folds in the AutoMLConfig object. It is also possible to bypass CV and use your own validation set by setting the *validation_data* parameter of AutoMLConfig. Here is a summary of AutoMLConfig parameters used for training the OJ model: |Property|Description| |-|-| |**task**|forecasting| |**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i> |**experiment_timeout_hours**|Experimentation timeout in hours.| |**enable_early_stopping**|If early stopping is on, training will stop when the primary metric is no longer improving.| |**training_data**|Input dataset, containing both features and label column.| |**label_column_name**|The name of the label column.| |**compute_target**|The remote compute for training.| |**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection| |**enable_voting_ensemble**|Allow AutoML to create a Voting ensemble of the best performing models| |**enable_stack_ensemble**|Allow AutoML to create a Stack ensemble of the best performing models| |**debug_log**|Log file path for writing debugging information| |**featurization**| 'auto' / 'off' / FeaturizationConfig Indicator for whether featurization step should be done automatically or not, or whether customized featurization should be used. Setting this enables AutoML to perform featurization on the input to handle *missing data*, and to perform some common *feature extraction*.| |**max_cores_per_iteration**|Maximum number of cores to utilize per iteration. A value of -1 indicates all available cores should be used ``` from azureml.automl.core.forecasting_parameters import ForecastingParameters forecasting_parameters = ForecastingParameters( time_column_name=time_column_name, forecast_horizon=n_test_periods, time_series_id_column_names=time_series_id_column_names ) automl_config = AutoMLConfig(task='forecasting', debug_log='automl_oj_sales_errors.log', primary_metric='normalized_mean_absolute_error', experiment_timeout_hours=0.25, training_data=train_dataset, label_column_name=target_column_name, compute_target=compute_target, enable_early_stopping=True, featurization=featurization_config, n_cross_validations=3, verbosity=logging.INFO, max_cores_per_iteration=-1, forecasting_parameters=forecasting_parameters) ``` You can now submit a new training run. Depending on the data and number of iterations this operation may take several minutes. Information from each iteration will be printed to the console. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous. ``` remote_run = experiment.submit(automl_config, show_output=False) remote_run remote_run.wait_for_completion() ``` ### Retrieve the Best Model Each run within an Experiment stores serialized (i.e. pickled) pipelines from the AutoML iterations. We can now retrieve the pipeline with the best performance on the validation dataset: ``` best_run, fitted_model = remote_run.get_output() print(fitted_model.steps) model_name = best_run.properties['model_name'] ``` ## Transparency View updated featurization summary ``` custom_featurizer = fitted_model.named_steps['timeseriestransformer'] custom_featurizer.get_featurization_summary() ``` # Forecasting Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. First, we remove the target values from the test set: ``` X_test = test y_test = X_test.pop(target_column_name).values X_test.head() ``` To produce predictions on the test set, we need to know the feature values at all dates in the test set. This requirement is somewhat reasonable for the OJ sales data since the features mainly consist of price, which is usually set in advance, and customer demographics which are approximately constant for each store over the 20 week forecast horizon in the testing data. ``` # forecast returns the predictions and the featurized data, aligned to X_test. # This contains the assumptions that were made in the forecast y_predictions, X_trans = fitted_model.forecast(X_test) ``` If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models. The [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb). # Evaluate To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). We'll add predictions and actuals into a single dataframe for convenience in calculating the metrics. ``` assign_dict = {'predicted': y_predictions, target_column_name: y_test} df_all = X_test.assign(**assign_dict) from azureml.automl.core.shared import constants from azureml.automl.runtime.shared.score import scoring from matplotlib import pyplot as plt # use automl scoring module scores = scoring.score_regression( y_test=df_all[target_column_name], y_pred=df_all['predicted'], metrics=list(constants.Metric.SCALAR_REGRESSION_SET)) print("[Test data scores]\n") for key, value in scores.items(): print('{}: {:.3f}'.format(key, value)) # Plot outputs %matplotlib inline test_pred = plt.scatter(df_all[target_column_name], df_all['predicted'], color='b') test_test = plt.scatter(df_all[target_column_name], df_all[target_column_name], color='g') plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8) plt.show() ``` # Operationalize _Operationalization_ means getting the model into the cloud so that other can run it after you close the notebook. We will create a docker running on Azure Container Instances with the model. ``` description = 'AutoML OJ forecaster' tags = None model = remote_run.register_model(model_name = model_name, description = description, tags = tags) print(remote_run.model_id) ``` ### Develop the scoring script For the deployment we need a function which will run the forecast on serialized data. It can be obtained from the best_run. ``` script_file_name = 'score_fcast.py' best_run.download_file('outputs/scoring_file_v_1_0_0.py', script_file_name) ``` ### Deploy the model as a Web Service on Azure Container Instance ``` from azureml.core.model import InferenceConfig from azureml.core.webservice import AciWebservice from azureml.core.webservice import Webservice from azureml.core.model import Model inference_config = InferenceConfig(environment = best_run.get_environment(), entry_script = script_file_name) aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 2, tags = {'type': "automl-forecasting"}, description = "Automl forecasting sample service") aci_service_name = 'automl-oj-forecast-01' print(aci_service_name) aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig) aci_service.wait_for_deployment(True) print(aci_service.state) aci_service.get_logs() ``` ### Call the service ``` import json X_query = X_test.copy() # We have to convert datetime to string, because Timestamps cannot be serialized to JSON. X_query[time_column_name] = X_query[time_column_name].astype(str) # The Service object accept the complex dictionary, which is internally converted to JSON string. # The section 'data' contains the data frame in the form of dictionary. test_sample = json.dumps({'data': X_query.to_dict(orient='records')}) response = aci_service.run(input_data = test_sample) # translate from networkese to datascientese try: res_dict = json.loads(response) y_fcst_all = pd.DataFrame(res_dict['index']) y_fcst_all[time_column_name] = pd.to_datetime(y_fcst_all[time_column_name], unit = 'ms') y_fcst_all['forecast'] = res_dict['forecast'] except: print(res_dict) y_fcst_all.head() ``` ### Delete the web service if desired ``` serv = Webservice(ws, 'automl-oj-forecast-01') serv.delete() # don't do it accidentally ```
github_jupyter
# Setup ``` %load_ext rpy2.ipython import os from json import loads as jloads from glob import glob import pandas as pd import datetime %%R library(gplots) library(ggplot2) library(ggthemes) library(reshape2) library(gridExtra) library(heatmap.plus) ascols = function(facs, pallette){ facs = facs[,1] ffacs = as.factor(as.character(facs)) n = length(unique(facs)) cols = pallette(n)[ffacs] } greyscale = function(n){ return(rev(gray.colors(n))) } def getsname(filename): return filename.split('/')[-1].split('.')[0] def readJSON(jsonf): return jloads(open(jsonf).read()) ``` # Beta Diversity ``` obj = readJSON('results/olympiome.beta_diversity_stats.json.json') speciesRhoKraken = obj['species']['rho_proportionality']['kraken'] speciesRhoKrakenDF = pd.DataFrame(speciesRhoKraken) speciesJSDKraken = obj['species']['jensen_shannon_distance']['kraken'] speciesJSDKrakenDF = pd.DataFrame(speciesJSDKraken) %%R -i speciesRhoKrakenDF beta.df = as.matrix(speciesRhoKrakenDF) diag(beta.df) = NA heatmap.2(beta.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", labCol=F, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale) %%R -i speciesJSDKrakenDF beta.df = as.matrix(speciesJSDKrakenDF) diag(beta.df) = NA heatmap.2(beta.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", labCol=F, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale) ``` # AMR ``` amrclassfs = glob('results/*.resistome_amrs.classus.tsv') def parseF(fname): out = {} with open(fname) as f: f.readline() for line in f: tkns = line.strip().split('\t') out[tkns[1]] = int(tkns[2]) return out amrclass = {getsname(amrclassf): parseF(amrclassf) for amrclassf in amrclassfs} amrclass = pd.DataFrame(amrclass).fillna(0).transpose() amrclass.shape %%R -i amrclass amr.df = t(as.matrix(amrclass)) heatmap.2(amr.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale) ``` # Virulence Factors ``` virfs = glob('results/*.vfdb_quantify.table.tsv') virs = {getsname(virf): pd.read_csv(virf).set_index('Unnamed: 0').transpose() for virf in virfs} virpan = pd.Panel(virs).transpose(2,0,1) #vrpkm = virpan['RPKM'].fillna(0).apply(pd.to_numeric) vrpkmg = virpan['RPKMG'].fillna(0).apply(pd.to_numeric) vrpkmghigh = vrpkmg.transpose().loc[vrpkmg.mean(axis=0) > 200] vrpkmghigh.shape %%R -i vrpkmghigh vir.df = as.matrix(vrpkmghigh) heatmap.2(vir.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.8, dendrogram="both", density.info="histogram", col=greyscale) ``` # Virulence vs AMR ``` virlevels = vrpkmg.transpose().mean() amrlevels = amrclass.transpose().mean().loc[virlevels.index] %%R -i virlevels -i amrlevels df = cbind(virlevels, amrlevels) colnames(df) = c("virulence", "antimicrobial") df = as.data.frame(df) ggplot(df, aes(virulence, antimicrobial)) + geom_point() + geom_rug() + theme_tufte(ticks=F) + xlab("Total Virulence") + ylab("Total AMR") + theme(axis.title.x = element_text(vjust=-0.5), axis.title.y = element_text(vjust=1)) ``` # Alpha Diversity ``` adivfs = glob('results/*.alpha_diversity_stats.json.json') adivs = {getsname(adivf): readJSON(adivf) for adivf in adivfs} chaoSpecies = {} shanSpecies = {} richSpecies = {} for sname, adiv in adivs.items(): chaoSpecies[sname] = adiv['kraken']['species']['chao1'] shanSpecies[sname] = adiv['kraken']['species']['shannon_index'] richSpecies[sname] = adiv['kraken']['species']['richness'] chaoSpeciesDF = pd.DataFrame(chaoSpecies).fillna(0) shanSpeciesDF = pd.DataFrame(shanSpecies).fillna(0) richSpeciesDF = pd.DataFrame(richSpecies).fillna(0) shanSpeciesDF.loc['500000'].sort_values() ``` # HMP Comparison ``` hmpfs = glob('results/*.hmp_site_dists.metaphlan2.json') def crunch(obj): out = {} for k, v in obj.items(): out[k] = sum(v) / len(v) return out hmps = {getsname(hmpf): crunch(readJSON(hmpf)) for hmpf in hmpfs} hmps = pd.DataFrame(hmps).transpose() %%R -i hmps hmp.df = melt(hmps) ggplot(hmp.df, aes(x=variable, y=value)) + theme_tufte() + geom_boxplot() + ylab('Cosine Similarity to HMP Sites') + xlab('Body Site') ``` # Taxonomy ``` krakfs = glob('results/*.kraken_taxonomy_profiling.mpa.mpa.tsv') def parseKrakF(krakf): out = {} with open(krakf) as kf: for line in kf: tkns = line.strip().split() taxa = tkns[0] if ('g__' in taxa) and ('s__' not in taxa): key = taxa.split('g__')[-1] out[key] = int(tkns[1]) return out def getTopN(vec, n): tups = vec.items() tups = sorted(tups, key=lambda x: -x[1]) out = {k: v for k, v in tups[:n]} return out krak10 = {getsname(krakf): getTopN(parseKrakF(krakf), 10) for krakf in krakfs} krak10 = pd.DataFrame(krak10).fillna(0).transpose() %%R -i krak10 krak.df = t(as.matrix(krak10)) krak.df = log(krak.df) krak.df[!is.finite(krak.df)] = 0 heatmap.2(krak.df, trace='none', margins=c(8,8), ColSideColorsSize=3, KeyValueName="Rho Prop.", cexCol=0.8, cexRow=0.7, dendrogram="both", density.info="histogram", col=greyscale) ```
github_jupyter
``` import cv2 import os import numpy from PIL import Image import matplotlib.pyplot as plt # !tar -xf EnglishHnd.tgz # !mv English/Hnd ./ # !rm -rf Hnd/Trj/ # !mv Hnd/Img/* Hnd/ # !rm -rf Hnd/Img # !rm -rf English # !rm -rf Hnd label_list = ['0','1','2','3','4','5','6','7','8','9', 'A','B','C','D','E','F','G','H', 'I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','fail'] # # count = 0 # os.remove("./Hnd/all.txt~") # for cc in os.listdir("./Hnd"): # count = cc[-2:] # os.rename('Hnd/' + cc, 'Hnd/' + label_list[int(count)-1]) import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision import transforms import matplotlib.pyplot as plt from torchvision.io import read_image transform = transforms.Compose( [ # transforms.ToPILImage(), transforms.Grayscale(), transforms.Resize((28,28)), transforms.ToTensor(), # transforms.Normalize((0.5), (0.5)), ] ) def load_dataset(): data_path = './Img/' train_dataset = datasets.ImageFolder( root=data_path, transform=transform ) # train_dataset = datasets.EMNIST(root= "./data",split="byclass", train = True, download = True, transform = transform) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=64, num_workers=2, shuffle=True ) return train_loader # for batch_idx, (data, target) in enumerate(load_dataset()): # print(batch_idx) dataiter = iter(load_dataset()) images, labels = dataiter.next() print(images.shape) print(labels.shape) figure = plt.figure() num_of_images = 60 for index in range(1, num_of_images + 1): plt.subplot(6, 10, index) plt.axis('off') plt.imshow(images[index].numpy().squeeze(), cmap='gray_r') load_dataset() device = 'cuda' if torch.cuda.is_available() else 'cpu' device # defining the model architecture class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.cnn_layers = torch.nn.Sequential( # Defining a 2D convolution layer torch.nn.Conv2d(1, 128, kernel_size=3, stride=1, padding=1), torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True), torch.nn.MaxPool2d(kernel_size=2, stride=2), # Defining another 2D convolution layer torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True), torch.nn.MaxPool2d(kernel_size=2, stride=2), ) self.linear_layers = torch.nn.Sequential( torch.nn.Linear(128 * 7 * 7, 63) ) # Defining the forward pass def forward(self, x): x = self.cnn_layers(x) x = x.view(x.size(0), -1) # print(x.size) x = self.linear_layers(x) return x model = Net() # optimizer = torch.optim.Adam(model.parameters(), lr = 0.05) optimizer = torch.optim.SGD(model.parameters(), lr=0.005, momentum=0.9) cc = torch.nn.CrossEntropyLoss() model.cuda() cc = cc.cuda() model for i in range(30): running_loss = 0 for batch_idx, (images, labels) in enumerate(load_dataset()): if torch.cuda.is_available(): images = images.cuda() labels = labels.cuda() # Training pass optimizer.zero_grad() output = model(images) loss = cc(output, labels) #This is where the model learns by backpropagating loss.backward() #And optimizes its weights here optimizer.step() running_loss += loss.item() else: print("Epoch {} - Training loss: {}".format(i+1, running_loss/len(load_dataset()))) torch.save(model, './model_character_detect.pt') running_loss image = Image.open("../../Images/segmentation2/image_2_ROI_5.png") image mm = torch.load('./model_character_detect.pt') image = image.resize((28,28)) # from PIL import ImageOps # # image = ImageOps.grayscale(image) image = transform(image) image = image.cuda() mm # image = image.cuda() lp = mm(image[None, ...]) ps = torch.exp(lp) probab = list(ps.cpu()[0]) pred_label = probab.index(max(probab)) pred_label max(probab) label_list[pred_label] ```
github_jupyter
``` !pip install datasets -q !pip install sagemaker -U -q !pip install s3fs==0.4.2 -U -q ``` ### Load dataset and have a peak: This cell is required in SageMaker Studio, otherwise the download of the dataset will throw an error. After running this cell, the kernel needs to be restarted. After restarting tthe kernel, continue with the cell below (loading the dataset) ``` %%capture import IPython !conda install -c conda-forge ipywidgets -y IPython.Application.instance().kernel.do_shutdown(True) from datasets import load_dataset import pandas as pd dataset = load_dataset('ade_corpus_v2', 'Ade_corpus_v2_classification') df = pd.DataFrame(dataset['train']) df.sample(5, random_state=124) ``` ### Determine ratio of positive ADE phrases compared to total dataset ``` df['label'].sum()/len(df) ``` ### Initialise Sagemaker variables and create S3 bucket ``` from sagemaker.huggingface.processing import HuggingFaceProcessor import sagemaker from sagemaker import get_execution_role sess = sagemaker.Session() role = sagemaker.get_execution_role() bucket = f"az-ade-{sess.account_id()}" sess._create_s3_bucket_if_it_does_not_exist(bucket_name=bucket, region=sess._region_name) ``` ### Save the name of the S3 bucket for later sessions ``` %store bucket ``` ### Set up processing job ``` hf_processor = HuggingFaceProcessor( role=role, instance_type="ml.p3.2xlarge", transformers_version='4.6', base_job_name="az-ade", pytorch_version='1.7', instance_count=1, ) from sagemaker.processing import ProcessingInput, ProcessingOutput outputs=[ ProcessingOutput(output_name="train_data", source="/opt/ml/processing/training", destination=f"s3://{bucket}/processing_output/train_data"), ProcessingOutput(output_name="validation_data", source="/opt/ml/processing/validation", destination=f"s3://{bucket}/processing_output/validation_data"), ProcessingOutput(output_name="test_data", source="/opt/ml/processing/test", destination=f"s3://{bucket}/processing_output/test_data"), ] arguments = ["--dataset-name", "ade_corpus_v2", "--datasubset-name", "Ade_corpus_v2_classification", "--model-name", "distilbert-base-uncased", "--train-ratio", "0.7", "--val-ratio", "0.15",] hf_processor.run( code="scripts/preprocess.py", outputs=outputs, arguments=arguments ) preprocessing_job_description = hf_processor.jobs[-1].describe() output_config = preprocessing_job_description['ProcessingOutputConfig'] for output in output_config['Outputs']: print(output['S3Output']['S3Uri']) ```
github_jupyter
# IllusTrip: Text to Video 3D Part of [Aphantasia](https://github.com/eps696/aphantasia) suite, made by Vadim Epstein [[eps696](https://github.com/eps696)] Based on [CLIP](https://github.com/openai/CLIP) + FFT/pixel ops from [Lucent](https://github.com/greentfrapp/lucent). 3D part by [deKxi](https://twitter.com/deKxi), based on [AdaBins](https://github.com/shariqfarooq123/AdaBins) depth. thanks to [Ryan Murdock](https://twitter.com/advadnoun), [Jonathan Fly](https://twitter.com/jonathanfly), [@eduwatch2](https://twitter.com/eduwatch2) for ideas. ## Features * continuously processes **multiple sentences** (e.g. illustrating lyrics or poems) * makes **videos**, evolving with pan/zoom/rotate motion * works with [inverse FFT](https://github.com/greentfrapp/lucent/blob/master/lucent/optvis/param/spatial.py) representation of the image or **directly with RGB** pixels (no GANs involved) * generates massive detailed textures (a la deepdream), **unlimited resolution** * optional **depth** processing for 3D look * various CLIP models * can start/resume from an image **Run the cell below after each session restart** Ensure that you're given Tesla T4/P4/P100 GPU, not K80! ``` #@title General setup !pip install ftfy==5.8 transformers !pip install gputil ffpb try: !pip3 install googletrans==3.1.0a0 from googletrans import Translator, constants translator = Translator() except: pass # !apt-get -qq install ffmpeg work_dir = '/content/illustrip' import os os.makedirs(work_dir, exist_ok=True) %cd $work_dir import os import io import time import math import random import imageio import numpy as np import PIL from base64 import b64encode import shutil from easydict import EasyDict as edict a = edict() import torch import torch.nn as nn import torch.nn.functional as F import torchvision from torchvision import transforms as T from torch.autograd import Variable from IPython.display import HTML, Image, display, clear_output from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import ipywidgets as ipy from google.colab import output, files import warnings warnings.filterwarnings("ignore") !pip install git+https://github.com/openai/CLIP.git --no-deps import clip !pip install sentence_transformers from sentence_transformers import SentenceTransformer !pip install kornia import kornia !pip install lpips import lpips !pip install PyWavelets==1.1.1 !pip install git+https://github.com/fbcotter/pytorch_wavelets %cd /content !rm -rf aphantasia !git clone https://github.com/eps696/aphantasia %cd aphantasia/ from clip_fft import to_valid_rgb, fft_image, rfft2d_freqs, img2fft, pixel_image, un_rgb from utils import basename, file_list, img_list, img_read, txt_clean, plot_text, old_torch from utils import slice_imgs, derivat, pad_up_to, slerp, checkout, sim_func, latent_anima import transforms import depth from progress_bar import ProgressIPy as ProgressBar shutil.copy('mask.jpg', work_dir) depth_mask_file = os.path.join(work_dir, 'mask.jpg') clear_output() def save_img(img, fname=None): img = np.array(img)[:,:,:] img = np.transpose(img, (1,2,0)) img = np.clip(img*255, 0, 255).astype(np.uint8) if fname is not None: imageio.imsave(fname, np.array(img)) imageio.imsave('result.jpg', np.array(img)) def makevid(seq_dir, size=None): char_len = len(basename(img_list(seq_dir)[0])) out_sequence = seq_dir + '/%0{}d.jpg'.format(char_len) out_video = seq_dir + '.mp4' print('.. generating video ..') !ffmpeg -y -v warning -i $out_sequence -crf 18 $out_video data_url = "data:video/mp4;base64," + b64encode(open(out_video,'rb').read()).decode() wh = '' if size is None else 'width=%d height=%d' % (size, size) return """<video %s controls><source src="%s" type="video/mp4"></video>""" % (wh, data_url) # Hardware check !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi import GPUtil as GPU gpu = GPU.getGPUs()[0] # XXX: only one GPU on Colab and isn’t guaranteed !nvidia-smi -L print("GPU RAM {0:.0f}MB | Free {1:.0f}MB)".format(gpu.memoryTotal, gpu.memoryFree)) #@title Load inputs #@markdown **Content** (either type a text string, or upload a text file): content = "" #@param {type:"string"} upload_texts = False #@param {type:"boolean"} #@markdown **Style** (either type a text string, or upload a text file): style = "" #@param {type:"string"} upload_styles = False #@param {type:"boolean"} #@markdown For non-English languages use Google translation: translate = False #@param {type:"boolean"} #@markdown Resume from the saved `.pt` snapshot, or from an image #@markdown (resolution settings below will be ignored in this case): if upload_texts: print('Upload main text file') uploaded = files.upload() text_file = list(uploaded)[0] texts = list(uploaded.values())[0].decode().split('\n') texts = [tt.strip() for tt in texts if len(tt.strip())>0 and tt[0] != '#'] print(' main text:', text_file, len(texts), 'lines') workname = txt_clean(basename(text_file)) else: texts = [content] workname = txt_clean(content)[:44] if upload_styles: print('Upload styles text file') uploaded = files.upload() text_file = list(uploaded)[0] styles = list(uploaded.values())[0].decode().split('\n') styles = [tt.strip() for tt in styles if len(tt.strip())>0 and tt[0] != '#'] print(' styles:', text_file, len(styles), 'lines') else: styles = [style] resume = False #@param {type:"boolean"} if resume: print('Upload file to resume from') resumed = files.upload() resumed_filename = list(resumed)[0] resumed_bytes = list(resumed.values())[0] assert len(texts) > 0 and len(texts[0]) > 0, 'No input text[s] found!' tempdir = os.path.join(work_dir, workname) os.makedirs(tempdir, exist_ok=True) print('main dir', tempdir) ``` **`content`** (what to draw) is your primary input; **`style`** (how to draw) is optional, if you want to separate such descriptions. If you load text file[s], the imagery will interpolate from line to line (ensure equal line counts for content and style lists, for their accordance). ``` #@title Google Drive [optional] #@markdown Run this cell, if you want to store results on your Google Drive. using_GDrive = True#@param{type:"boolean"} if using_GDrive: import os from google.colab import drive if not os.path.isdir('/G/MyDrive'): drive.mount('/G', force_remount=True) gdir = '/G/MyDrive' tempdir = os.path.join(gdir, 'illustrip', workname) os.makedirs(tempdir, exist_ok=True) print('main dir', tempdir) #@title Main settings sideX = 1280 #@param {type:"integer"} sideY = 720 #@param {type:"integer"} steps = 200 #@param {type:"integer"} frame_step = 100 #@param {type:"integer"} #@markdown > Config method = 'RGB' #@param ['FFT', 'RGB'] model = 'ViT-B/32' #@param ['ViT-B/16', 'ViT-B/32', 'RN101', 'RN50x16', 'RN50x4', 'RN50'] # Default settings if method == 'RGB': align = 'overscan' colors = 2 contrast = 1.2 sharpness = -1. aug_noise = 0. smooth = False else: align = 'uniform' colors = 1.8 contrast = 1.1 sharpness = 1. aug_noise = 2. smooth = True interpolate_topics = True style_power = 1. samples = 200 save_step = 1 learning_rate = 1. aug_transform = 'custom' similarity_function = 'cossim' macro = 0.4 enforce = 0. expand = 0. zoom = 0.012 shift = 10 rotate = 0.8 distort = 0.3 animate_them = True sample_decrease = 1. DepthStrength = 0. print(' loading CLIP model..') model_clip, _ = clip.load(model, jit=old_torch()) modsize = model_clip.visual.input_resolution xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33} if model in xmem.keys(): sample_decrease *= xmem[model] clear_output() print(' using CLIP model', model) ``` **`FFT`** method uses inverse FFT representation of the image. It allows flexible motion, but is either blurry (if smoothed) or noisy (if not). **`RGB`** method directly optimizes image pixels (without FFT parameterization). It's more clean and stable, when zooming in. There are few choices for CLIP `model` (results do vary!). I prefer ViT-B/32 for consistency, next best bet is ViT-B/16. **`steps`** defines the length of animation per text line (multiply it to the inputs line count to get total video duration in frames). `frame_step` sets frequency of the changes in animation (how many frames between motion keypoints). ## Other settings [optional] ``` #@title Run this cell to override settings, if needed #@markdown [to roll back defaults, run "Main settings" cell again] style_power = 1. #@param {type:"number"} overscan = True #@param {type:"boolean"} align = 'overscan' if overscan else 'uniform' interpolate_topics = True #@param {type:"boolean"} #@markdown > Look colors = 2 #@param {type:"number"} contrast = 1.2 #@param {type:"number"} sharpness = 0. #@param {type:"number"} #@markdown > Training samples = 200 #@param {type:"integer"} save_step = 1 #@param {type:"integer"} learning_rate = 1. #@param {type:"number"} #@markdown > Tricks aug_transform = 'custom' #@param ['elastic', 'custom', 'none'] aug_noise = 0. #@param {type:"number"} macro = 0.4 #@param {type:"number"} enforce = 0. #@param {type:"number"} expand = 0. #@param {type:"number"} similarity_function = 'cossim' #@param ['cossim', 'spherical', 'mixed', 'angular', 'dot'] #@markdown > Motion zoom = 0.012 #@param {type:"number"} shift = 10 #@param {type:"number"} rotate = 0.8 #@param {type:"number"} distort = 0.3 #@param {type:"number"} animate_them = True #@param {type:"boolean"} smooth = True #@param {type:"boolean"} if method == 'RGB': smooth = False ``` `style_power` controls the strength of the style descriptions, comparing to the main input. `overscan` provides better frame coverage (needed for RGB method). `interpolate_topics` changes the subjects smoothly, otherwise they're switched by cut, making sharper transitions. Decrease **`samples`** if you face OOM (it's the main RAM eater), or just to speed up the process (with the cost of quality). `save_step` defines, how many optimization steps are taken between saved frames. Set it >1 for stronger image processing. Experimental tricks: `aug_transform` applies some augmentations, which quite radically change the output of this method (and slow down the process). Try yourself to see which is good for your case. `aug_noise` augmentation [FFT only!] seems to enhance optimization with transforms. `macro` boosts bigger forms. `enforce` adds more details by enforcing similarity between two parallel samples. `expand` boosts diversity (up to irrelevant) by enforcing difference between prev/next samples. Motion section: `shift` is in pixels, `rotate` in degrees. The values will be used as limits, if you mark `animate_them`. `smooth` reduces blinking, but induces motion blur with subtle screen-fixed patterns (valid only for FFT method, disabled for RGB). ## Add 3D depth [optional] ``` ### deKxi:: This whole cell contains most of whats needed, # with just a few changes to hook it up via frame_transform # (also glob_step now as global var) # I highly recommend performing the frame transformations and depth *after* saving, # (or just the depth warp if you prefer to keep the other affines as they are) # from my testing it reduces any noticeable stretching and allows the new areas # revealed from the changed perspective to be filled/detailed # pretrained models: Nyu is much better but Kitti is an option too depth_model = 'nyu' # @ param ["nyu","kitti"] DepthStrength = 0.01 #@param{type:"number"} MaskBlurAmt = 33 #@param{type:"integer"} save_depth = False #@param{type:"boolean"} size = (sideY,sideX) #@markdown NB: depth computing may take up to ~3x more time. Read the comments inside for more info. #@markdown Courtesy of [deKxi](https://twitter.com/deKxi) if DepthStrength > 0: if not os.path.exists("AdaBins_nyu.pt"): !gdown https://drive.google.com/uc?id=1lvyZZbC9NLcS8a__YPcUP7rDiIpbRpoF if not os.path.exists('AdaBins_nyu.pt'): !wget https://www.dropbox.com/s/tayczpcydoco12s/AdaBins_nyu.pt # if depth_model=='kitti' and not os.path.exists(os.path.join(workdir_depth, "pretrained/AdaBins_kitti.pt")): # !gdown https://drive.google.com/uc?id=1HMgff-FV6qw1L0ywQZJ7ECa9VPq1bIoj if save_depth: depthdir = os.path.join(tempdir, 'depth') os.makedirs(depthdir, exist_ok=True) print('depth dir', depthdir) else: depthdir = None depth_infer, depth_mask = depth.init_adabins(model_path='AdaBins_nyu.pt', mask_path='mask.jpg', size=size) def depth_transform(img_t, img_np, depth_infer, depth_mask, size, depthX=0, scale=1., shift=[0,0], colors=1, depth_dir=None, save_num=0): # d X/Y define the origin point of the depth warp, effectively a "3D pan zoom", [-1..1] # plus = look ahead, minus = look aside dX = 100. * shift[0] / size[1] dY = 100. * shift[1] / size[0] # dZ = movement direction: 1 away (zoom out), 0 towards (zoom in), 0.5 stay dZ = 0.5 + 23. * (scale[0]-1) # dZ += 0.5 * float(math.sin(((save_num % 70)/70) * math.pi * 2)) if img_np is None: img2 = img_t.clone().detach() par, imag, _ = pixel_image(img2.shape, resume=img2) img2 = to_valid_rgb(imag, colors=colors)() img2 = img2.detach().cpu().numpy()[0] img2 = (np.transpose(img2, (1,2,0))) # [h,w,c] img2 = np.clip(img2*255, 0, 255).astype(np.uint8) image_pil = T.ToPILImage()(img2) del img2 else: image_pil = T.ToPILImage()(img_np) size2 = [s//2 for s in size] img = depth.depthwarp(img_t, image_pil, depth_infer, depth_mask, size2, depthX, [dX,dY], dZ, rescale=0.5, clip_range=2, save_path=depth_dir, save_num=save_num) return img ``` ## Generate ``` #@title Generate if aug_transform == 'elastic': trform_f = transforms.transforms_elastic sample_decrease *= 0.95 elif aug_transform == 'custom': trform_f = transforms.transforms_custom sample_decrease *= 0.95 else: trform_f = transforms.normalize() if enforce != 0: sample_decrease *= 0.5 samples = int(samples * sample_decrease) print(' using %s method, %d samples' % (method, samples)) if translate: translator = Translator() def enc_text(txt): if translate: txt = translator.translate(txt, dest='en').text emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77]) return emb.detach().clone() # Encode inputs count = 0 # max count of texts and styles key_txt_encs = [enc_text(txt) for txt in texts] count = max(count, len(key_txt_encs)) key_styl_encs = [enc_text(style) for style in styles] count = max(count, len(key_styl_encs)) assert count > 0, "No inputs found!" # !rm -rf $tempdir # os.makedirs(tempdir, exist_ok=True) # opt_steps = steps * save_step # for optimization glob_steps = count * steps # saving if glob_steps == frame_step: frame_step = glob_steps // 2 # otherwise no motion outpic = ipy.Output() outpic if method == 'RGB': if resume: img_in = imageio.imread(resumed_bytes) / 255. params_tmp = torch.Tensor(img_in).permute(2,0,1).unsqueeze(0).float().cuda() params_tmp = un_rgb(params_tmp, colors=1.) sideY, sideX = img_in.shape[0], img_in.shape[1] else: params_tmp = torch.randn(1, 3, sideY, sideX).cuda() # * 0.01 else: # FFT if resume: if os.path.splitext(resumed_filename)[1].lower()[1:] in ['jpg','png','tif','bmp']: img_in = imageio.imread(resumed_bytes) params_tmp = img2fft(img_in, 1.5, 1.) * 2. else: params_tmp = torch.load(io.BytesIO(resumed_bytes)) if isinstance(params_tmp, list): params_tmp = params_tmp[0] params_tmp = params_tmp.cuda() sideY, sideX = params_tmp.shape[2], (params_tmp.shape[3]-1)*2 else: params_shape = [1, 3, sideY, sideX//2+1, 2] params_tmp = torch.randn(*params_shape).cuda() * 0.01 params_tmp = params_tmp.detach() # function() = torch.transformation(linear) # animation controls if animate_them: if method == 'RGB': m_scale = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[-0.3]) m_scale = 1 + (m_scale + 0.3) * zoom # only zoom in else: m_scale = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.6]) m_scale = 1 - (m_scale-0.6) * zoom # ping pong m_shift = latent_anima([2], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5,0.5]) m_angle = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5]) m_shear = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5]) m_shift = (m_shift-0.5) * shift * abs(m_scale-1.) / zoom m_angle = (m_angle-0.5) * rotate * abs(m_scale-1.) / zoom m_shear = (m_shear-0.5) * distort * abs(m_scale-1.) / zoom def get_encs(encs, num): cnt = len(encs) if cnt == 0: return [] enc_1 = encs[min(num, cnt-1)] enc_2 = encs[min(num+1, cnt-1)] return slerp(enc_1, enc_2, steps) def frame_transform(img, size, angle, shift, scale, shear): if old_torch(): # 1.7.1 img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR) img = T.functional.center_crop(img, size) img = pad_up_to(img, size) else: # 1.8+ img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR) img = T.functional.center_crop(img, size) # on 1.8+ also pads return img global img_np img_np = None prev_enc = 0 def process(num): global params_tmp, img_np, opt_state, params, image_f, optimizer, pbar if interpolate_topics: txt_encs = get_encs(key_txt_encs, num) styl_encs = get_encs(key_styl_encs, num) else: txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * steps if len(key_txt_encs) > 0 else [] styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * steps if len(key_styl_encs) > 0 else [] if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80]) if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80]) for ii in range(steps): glob_step = num * steps + ii # saving/transforming ### animation: transform frame, reload params h, w = sideY, sideX # transform frame for motion scale = m_scale[glob_step] if animate_them else 1-zoom trans = tuple(m_shift[glob_step]) if animate_them else [0, shift] angle = m_angle[glob_step][0] if animate_them else rotate shear = m_shear[glob_step][0] if animate_them else distort if method == 'RGB': if DepthStrength > 0: params_tmp = depth_transform(params_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step) params_tmp = frame_transform(params_tmp, (h,w), angle, trans, scale, shear) params, image_f, _ = pixel_image([1,3,h,w], resume=params_tmp) img_tmp = None else: # FFT if old_torch(): # 1.7.1 img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=(h,w)) if DepthStrength > 0: img_tmp = depth_transform(img_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step) img_tmp = frame_transform(img_tmp, (h,w), angle, trans, scale, shear) params_tmp = torch.rfft(img_tmp, 2, normalized=True) else: # 1.8+ if type(params_tmp) is not torch.complex64: params_tmp = torch.view_as_complex(params_tmp) img_tmp = torch.fft.irfftn(params_tmp, s=(h,w), norm='ortho') if DepthStrength > 0: img_tmp = depth_transform(img_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step) img_tmp = frame_transform(img_tmp, (h,w), angle, trans, scale, shear) params_tmp = torch.fft.rfftn(img_tmp, s=[h,w], dim=[2,3], norm='ortho') params_tmp = torch.view_as_real(params_tmp) params, image_f, _ = fft_image([1,3,h,w], resume=params_tmp, sd=1.) image_f = to_valid_rgb(image_f, colors=colors) del img_tmp optimizer = torch.optim.Adam(params, learning_rate) # optimizer = torch.optim.AdamW(params, learning_rate, weight_decay=0.01, amsgrad=True) if smooth is True and num + ii > 0: optimizer.load_state_dict(opt_state) # get encoded inputs txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None ### optimization for ss in range(save_step): loss = 0 noise = aug_noise * (torch.rand(1, 1, *params[0].shape[2:4], 1)-0.5).cuda() if aug_noise > 0 else 0. img_out = image_f(noise) img_sliced = slice_imgs([img_out], samples, modsize, trform_f, align, macro)[0] out_enc = model_clip.encode_image(img_sliced) if method == 'RGB': # empirical hack loss += 1.5 * abs(img_out.mean((2,3)) - 0.45).mean() # fix brightness loss += 1.5 * abs(img_out.std((2,3)) - 0.17).sum() # fix contrast if txt_enc is not None: loss -= sim_func(txt_enc, out_enc, similarity_function) if styl_enc is not None: loss -= style_power * sim_func(styl_enc, out_enc, similarity_function) if sharpness != 0: # mode = scharr|sobel|naive loss -= sharpness * derivat(img_out, mode='naive') # loss -= sharpness * derivat(img_sliced, mode='scharr') if enforce != 0: img_sliced = slice_imgs([image_f(noise)], samples, modsize, trform_f, align, macro)[0] out_enc2 = model_clip.encode_image(img_sliced) loss -= enforce * sim_func(out_enc, out_enc2, similarity_function) del out_enc2; torch.cuda.empty_cache() if expand > 0: global prev_enc if ii > 0: loss += expand * sim_func(prev_enc, out_enc, similarity_function) prev_enc = out_enc.detach().clone() del img_out, img_sliced, out_enc; torch.cuda.empty_cache() optimizer.zero_grad() loss.backward() optimizer.step() ### save params & frame params_tmp = params[0].detach().clone() if smooth is True: opt_state = optimizer.state_dict() with torch.no_grad(): img_t = image_f(contrast=contrast)[0].permute(1,2,0) img_np = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8) imageio.imsave(os.path.join(tempdir, '%05d.jpg' % glob_step), img_np, quality=95) shutil.copy(os.path.join(tempdir, '%05d.jpg' % glob_step), 'result.jpg') outpic.clear_output() with outpic: display(Image('result.jpg')) del img_t pbar.upd() params_tmp = params[0].detach().clone() outpic = ipy.Output() outpic pbar = ProgressBar(glob_steps) for i in range(count): process(i) HTML(makevid(tempdir)) files.download(tempdir + '.mp4') ## deKxi: downloading depth video if save_depth and DepthStrength > 0: HTML(makevid(depthdir)) files.download(depthdir + '.mp4') ``` If video is not auto-downloaded after generation (for whatever reason), run this cell to do that: ``` files.download(tempdir + '.mp4') if save_depth and DepthStrength > 0: files.download(depthdir + '.mp4') ```
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex client library: Custom training tabular regression model with pipeline for online prediction with training pipeline <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom tabular regression model for online prediction, using a training pipeline. ### Dataset The dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD. ### Objective In this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console. The steps performed include: - Create a Vertex custom job for training a model. - Create a `TrainingPipeline` resource. - Train a TensorFlow model with the `TrainingPipeline` resource. - Retrieve and load the model artifacts. - View the model evaluation. - Upload the model as a Vertex `Model` resource. - Deploy the `Model` resource to a serving `Endpoint` resource. - Make a prediction. - Undeploy the `Model` resource. ### Costs This tutorial uses billable components of Google Cloud (GCP): * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ## Installation Install the latest version of Vertex client library. ``` import os import sys # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install -U google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` ### Restart the kernel Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. ``` if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** When you submit a custom training job using the Vertex client library, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex runs the code from this package. In this tutorial, Vertex also saves the trained model that results from your job in the same bucket. You can then create an `Endpoint` resource based on this output in order to serve online predictions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import Vertex client library Import the Vertex client library into our Python environment. ``` import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value ``` #### Vertex constants Setup up the following constants for Vertex: - `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services. - `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### CustomJob constants Set constants unique to CustomJob training: - Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for. ``` CUSTOM_TASK_GCS_PATH = ( "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" ) ``` #### Hardware Accelerators Set the hardware accelerators (e.g., GPU), if any, for training and prediction. Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100 Otherwise specify `(None, None)` to use a container image to run on a CPU. *Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. ``` if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) if os.getenv("IS_TESTING_DEPOLY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPOLY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) ``` #### Container (Docker) image Next, we will set the Docker container images for training and prediction - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest` - TensorFlow 2.4 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest` For the latest list, see [Pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest` For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) ``` if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2-1" if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) ``` #### Machine Type Next, set the machine type to use for training and prediction. - Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. ``` if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own custom model and training for Boston Housing. ## Set up clients The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Model Service for `Model` resources. - Pipeline Service for training. - Endpoint Service for deployment. - Job Service for batch jobs and custom training. - Prediction Service for serving. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=client_options) return client clients = {} clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() for client in clients.items(): print(client) ``` ## Train a model There are two ways you can train a custom model using a container image: - **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model. - **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. ## Prepare your custom job specification Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following: - `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed) - `python_package_spec` : The specification of the Python package to be installed with the pre-built container. ### Prepare your machine specification Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. ``` if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} ``` ### Prepare your disk specification (optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB. ``` DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} ``` ### Define the worker pool specification Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following: - `replica_count`: The number of instances to provision of this machine type. - `machine_spec`: The hardware specification. - `disk_spec` : (optional) The disk storage specification. - `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module. Let's dive deeper now into the python package specification: -`executor_image_spec`: This is the docker image which is configured for your custom training job. -`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image. -`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix. -`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances. - `"--param-file=" + PARAM_FILE`: The Cloud Storage location for storing feature normalization values. ``` JOB_NAME = "custom_job_" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME) if not TRAIN_NGPU or TRAIN_NGPU < 2: TRAIN_STRATEGY = "single" else: TRAIN_STRATEGY = "mirror" EPOCHS = 20 STEPS = 100 PARAM_FILE = BUCKET_NAME + "/params.txt" DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, "--param-file=" + PARAM_FILE, ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, "--param-file=" + PARAM_FILE, ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "python_package_spec": { "executor_image_uri": TRAIN_IMAGE, "package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"], "python_module": "trainer.task", "args": CMDARGS, }, } ] ``` ### Examine the training package #### Package layout Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. - PKG-INFO - README.md - setup.cfg - setup.py - trainer - \_\_init\_\_.py - task.py The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image. The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). #### Package Assembly In the following cells, you will assemble the training package. ``` # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py ``` #### Task.py contents In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary: - Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`. - Loads Boston Housing dataset from TF.Keras builtin datasets - Builds a simple deep neural network model using TF.Keras model API. - Compiles the model (`compile()`). - Sets a training distribution strategy according to the argument `args.distribute`. - Trains the model (`fit()`) with epochs specified by `args.epochs`. - Saves the trained model (`save(args.model_dir)`) to the specified model directory. - Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file. ``` %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for Boston Housing import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import numpy as np import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=0.001, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=20, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=100, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') parser.add_argument('--param-file', dest='param_file', default='/tmp/param.txt', type=str, help='Output file for parameters') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) def make_dataset(): # Scaling Boston Housing data features def scale(feature): max = np.max(feature) feature = (feature / max).astype(np.float) return feature, max (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data( path="boston_housing.npz", test_split=0.2, seed=113 ) params = [] for _ in range(13): x_train[_], max = scale(x_train[_]) x_test[_], _ = scale(x_test[_]) params.append(max) # store the normalization (max) value for each feature with tf.io.gfile.GFile(args.param_file, 'w') as f: f.write(str(params)) return (x_train, y_train), (x_test, y_test) # Build the Keras model def build_and_compile_dnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(1, activation='linear') ]) model.compile( loss='mse', optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr)) return model NUM_WORKERS = strategy.num_replicas_in_sync # Here the batch size scales up by number of workers since # `tf.data.Dataset.batch` expects the global batch size. BATCH_SIZE = 16 GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_dnn_model() # Train the model (x_train, y_train), (x_test, y_test) = make_dataset() model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE) model.save(args.model_dir) ``` #### Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. ``` ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz ``` ## Train the model using a `TrainingPipeline` resource Now start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps: 1. Create a Vertex `TrainingPipeline` resource for the `Dataset` resource. 2. Execute the pipeline to start the training. ### Create a `TrainingPipeline` resource You may ask, what do we use a pipeline for? We typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: 1. Being reusable for subsequent training jobs. 2. Can be containerized and ran as a batch job. 3. Can be distributed. 4. All the steps are associated with the same pipeline job for tracking progress. #### The `training_pipeline` specification First, you need to describe a pipeline specification. Let's look into the *minimal* requirements for constructing a `training_pipeline` specification for a custom job: - `display_name`: A human readable name for the pipeline job. - `training_task_definition`: The training task schema. - `training_task_inputs`: A dictionary describing the requirements for the training job. - `model_to_upload`: A dictionary describing the specification for the (uploaded) Vertex custom `Model` resource. - `display_name`: A human readable name for the `Model` resource. - `artificat_uri`: The Cloud Storage path where the model artifacts are stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the custom model will serve predictions. ``` from google.protobuf import json_format from google.protobuf.struct_pb2 import Value MODEL_NAME = "custom_pipeline-" + TIMESTAMP PIPELINE_DISPLAY_NAME = "custom-training-pipeline" + TIMESTAMP training_task_inputs = json_format.ParseDict( {"workerPoolSpecs": worker_pool_spec}, Value() ) pipeline = { "display_name": PIPELINE_DISPLAY_NAME, "training_task_definition": CUSTOM_TASK_GCS_PATH, "training_task_inputs": training_task_inputs, "model_to_upload": { "display_name": PIPELINE_DISPLAY_NAME + "-model", "artifact_uri": MODEL_DIR, "container_spec": {"image_uri": DEPLOY_IMAGE}, }, } print(pipeline) ``` #### Create the training pipeline Use this helper function `create_pipeline`, which takes the following parameter: - `training_pipeline`: the full specification for the pipeline training job. The helper function calls the pipeline client service's `create_pipeline` method, which takes the following parameters: - `parent`: The Vertex location root path for your `Dataset`, `Model` and `Endpoint` resources. - `training_pipeline`: The full specification for the pipeline training job. The helper function will return the Vertex fully qualified identifier assigned to the training pipeline, which is saved as `pipeline.name`. ``` def create_pipeline(training_pipeline): try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline response = create_pipeline(pipeline) ``` Now save the unique identifier of the training pipeline you created. ``` # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) ``` ### Get information on a training pipeline Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter: - `name`: The Vertex fully qualified pipeline identifier. When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`. ``` def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) ``` # Deployment Training the above model may take upwards of 20 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`. ``` while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) if not DIRECT: MODEL_DIR = MODEL_DIR + "/model" model_path_to_deploy = MODEL_DIR ``` ## Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction. To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. ``` import tensorflow as tf model = tf.keras.models.load_model(MODEL_DIR) ``` ## Evaluate the model Now let's find out how good the model is. ### Load evaluation data You will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home). You don't need the training data, and hence why we loaded it as `(_, _)`. Before you can run the data through evaluation, you need to preprocess it: x_test: 1. Normalize (rescaling) the data in each column by dividing each value by the maximum value of that column. This will replace each single value with a 32-bit floating point number between 0 and 1. ``` import numpy as np from tensorflow.keras.datasets import boston_housing (_, _), (x_test, y_test) = boston_housing.load_data( path="boston_housing.npz", test_split=0.2, seed=113 ) def scale(feature): max = np.max(feature) feature = (feature / max).astype(np.float32) return feature # Let's save one data item that has not been scaled x_test_notscaled = x_test[0:1].copy() for _ in range(13): x_test[_] = scale(x_test[_]) x_test = x_test.astype(np.float32) print(x_test.shape, x_test.dtype, y_test.shape) print("scaled", x_test[0]) print("unscaled", x_test_notscaled) ``` ### Perform the model evaluation Now evaluate how well the model in the custom job did. ``` model.evaluate(x_test, y_test) ``` ## Upload the model for serving Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. ### How does the serving function work When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`. The serving function consists of two parts: - `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc. - `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc. Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. ## Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. ``` loaded = tf.saved_model.load(model_path_to_deploy) serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input) ``` ### Upload the model Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions. The helper function takes the following parameters: - `display_name`: A human readable name for the `Endpoint` service. - `image_uri`: The container image for the model deployment. - `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`. The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters: - `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources. - `model`: The specification for the Vertex `Model` resource instance. Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields: - `display_name`: A human readable name for the `Model` resource. - `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`). - `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready. The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id. ``` IMAGE_URI = DEPLOY_IMAGE def upload_model(display_name, image_uri, model_uri): model = { "display_name": display_name, "metadata_schema_uri": "", "artifact_uri": model_uri, "container_spec": { "image_uri": image_uri, "command": [], "args": [], "env": [{"name": "env_name", "value": "env_value"}], "ports": [{"container_port": 8080}], "predict_route": "", "health_route": "", }, } response = clients["model"].upload_model(parent=PARENT, model=model) print("Long running operation:", response.operation.name) upload_model_response = response.result(timeout=180) print("upload_model_response") print(" model:", upload_model_response.model) return upload_model_response.model model_to_deploy_id = upload_model( "boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy ) ``` ### Get `Model` resource information Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter: - `name`: The Vertex unique identifier for the `Model` resource. This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter: - `name`: The Vertex unique identifier for the `Model` resource. ``` def get_model(name): response = clients["model"].get_model(name=name) print(response) get_model(model_to_deploy_id) ``` ## Deploy the `Model` resource Now deploy the trained Vertex custom `Model` resource. This requires two steps: 1. Create an `Endpoint` resource for deploying the `Model` resource to. 2. Deploy the `Model` resource to the `Endpoint` resource. ### Create an `Endpoint` resource Use this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter: - `display_name`: A human readable name for the `Endpoint` resource. Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`. ``` ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP def create_endpoint(display_name): endpoint = {"display_name": display_name} response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint) print("Long running operation:", response.operation.name) result = response.result(timeout=300) print("result") print(" name:", result.name) print(" display_name:", result.display_name) print(" description:", result.description) print(" labels:", result.labels) print(" create_time:", result.create_time) print(" update_time:", result.update_time) return result result = create_endpoint(ENDPOINT_NAME) ``` Now get the unique identifier for the `Endpoint` resource you created. ``` # The full unique ID for the endpoint endpoint_id = result.name # The short numeric ID for the endpoint endpoint_short_id = endpoint_id.split("/")[-1] print(endpoint_id) ``` ### Compute instance scaling You have several choices on scaling the compute instances for handling your online prediction requests: - Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them. - Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Deploy `Model` resource to the `Endpoint` resource Use this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters: - `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline. - `deploy_model_display_name`: A human readable name for the deployed model. - `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to. The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters: - `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to. - `deployed_model`: The requirements specification for deploying the model. - `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100. Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields: - `model`: The Vertex fully qualified model identifier of the (upload) model to deploy. - `display_name`: A human readable name for the deployed model. - `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production. - `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. #### Traffic Split Let's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance. Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. #### Response The method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources. ``` DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP def deploy_model( model, deployed_model_display_name, endpoint, traffic_split={"0": 100} ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } deployed_model = { "model": model, "display_name": deployed_model_display_name, "dedicated_resources": { "min_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, "machine_spec": machine_spec, }, "disable_container_logging": False, } response = clients["endpoint"].deploy_model( endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split ) print("Long running operation:", response.operation.name) result = response.result() print("result") deployed_model = result.deployed_model print(" deployed_model") print(" id:", deployed_model.id) print(" model:", deployed_model.model) print(" display_name:", deployed_model.display_name) print(" create_time:", deployed_model.create_time) return deployed_model.id deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id) ``` ## Make a online prediction request Now do a online prediction to your deployed model. ### Get test item You will use an example out of the test (holdout) portion of the dataset as a test item. ``` test_item = x_test[0] test_label = y_test[0] print(test_item.shape) ``` ### Send the prediction request Ok, now you have a test data item. Use this helper function `predict_data`, which takes the parameters: - `data`: The test data item as a numpy 1D array of floating point values. - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed. - `parameters_dict`: Additional parameters for serving. This function uses the prediction client service and calls the `predict` method with the parameters: - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed. - `instances`: A list of instances (data items) to predict. - `parameters`: Additional parameters for serving. To pass the test data to the prediction service, you package it for transmission to the serving binary as follows: 1. Convert the data item from a 1D numpy array to a 1D Python list. 2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`) Each instance in the prediction request is a dictionary entry of the form: {input_name: content} - `input_name`: the name of the input layer of the underlying model. - `content`: The data item as a 1D Python list. Since the `predict()` service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service. The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction: - `predictions` -- the predicated median value of a house in units of 1K USD. ``` def predict_data(data, endpoint, parameters_dict): parameters = json_format.ParseDict(parameters_dict, Value()) # The format of each instance should conform to the deployed model's prediction input schema. instances_list = [{serving_input: data.tolist()}] instances = [json_format.ParseDict(s, Value()) for s in instances_list] response = clients["prediction"].predict( endpoint=endpoint, instances=instances, parameters=parameters ) print("response") print(" deployed_model_id:", response.deployed_model_id) predictions = response.predictions print("predictions") for prediction in predictions: print(" prediction:", prediction) predict_data(test_item, endpoint_id, None) ``` ## Undeploy the `Model` resource Now undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to. - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to. This function calls the endpoint client service's method `undeploy_model`, with the following parameters: - `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed. - `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed. - `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource. Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}. ``` def undeploy_model(deployed_model_id, endpoint): response = clients["endpoint"].undeploy_model( endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={} ) print(response) undeploy_model(deployed_model_id, endpoint_id) ``` # Cleaning up To clean up all GCP resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
<img src="http://akhavanpour.ir/notebook/images/srttu.gif" alt="SRTTU" style="width: 150px;"/> [![Azure Notebooks](https://notebooks.azure.com/launch.png)](https://notebooks.azure.com/import/gh/Alireza-Akhavan/class.vision) # <div style="direction:rtl;text-align:right;font-family:B Lotus, B Nazanin, Tahoma"> تولید متن با شبکه بازگشتی LSTM در Keras</div> <div style="direction:rtl;text-align:right;font-family:Tahoma"> کدها برگرفته از فصل هشتم کتاب </div> [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff) <div style="direction:rtl;text-align:right;font-family:Tahoma"> و گیت هاب نویسنده کتاب و توسعه دهنده کراس </div> [François Chollet](http://nbviewer.jupyter.org/github/fchollet/deep-learning-with-python-notebooks/blob/master/8.1-text-generation-with-lstm.ipynb) <div style="direction:rtl;text-align:right;font-family:Tahoma"> است. </div> ``` import keras keras.__version__ ``` # Text generation with LSTM ## Implementing character-level LSTM text generation Let's put these ideas in practice in a Keras implementation. The first thing we need is a lot of text data that we can use to learn a language model. You could use any sufficiently large text file or set of text files -- Wikipedia, the Lord of the Rings, etc. In this example we will use some of the writings of Nietzsche, the late-19th century German philosopher (translated to English). The language model we will learn will thus be specifically a model of Nietzsche's writing style and topics of choice, rather than a more generic model of the English language. ### <div style="direction:rtl;text-align:right;font-family:B Lotus, B Nazanin, Tahoma"> مجموعه داده </div> ``` import keras import numpy as np path = keras.utils.get_file( 'nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt') text = open(path).read().lower() print('Corpus length:', len(text)) ``` Next, we will extract partially-overlapping sequences of length `maxlen`, one-hot encode them and pack them in a 3D Numpy array `x` of shape `(sequences, maxlen, unique_characters)`. Simultaneously, we prepare a array `y` containing the corresponding targets: the one-hot encoded characters that come right after each extracted sequence. ``` # Length of extracted character sequences maxlen = 60 # We sample a new sequence every `step` characters step = 3 # This holds our extracted sequences sentences = [] # This holds the targets (the follow-up characters) next_chars = [] for i in range(0, len(text) - maxlen, step): sentences.append(text[i: i + maxlen]) next_chars.append(text[i + maxlen]) print('Number of sequences:', len(sentences)) # List of unique characters in the corpus chars = sorted(list(set(text))) print('Unique characters:', len(chars)) # Dictionary mapping unique characters to their index in `chars` char_indices = dict((char, chars.index(char)) for char in chars) # Next, one-hot encode the characters into binary arrays. print('Vectorization...') x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sentences), len(chars)), dtype=np.bool) for i, sentence in enumerate(sentences): for t, char in enumerate(sentence): x[i, t, char_indices[char]] = 1 y[i, char_indices[next_chars[i]]] = 1 ``` ## <div style="direction:rtl;text-align:right;font-family:B Lotus, B Nazanin, Tahoma"> ایجاد شبکه (Building the network)</div> Our network is a single `LSTM` layer followed by a `Dense` classifier and softmax over all possible characters. But let us note that recurrent neural networks are not the only way to do sequence data generation; 1D convnets also have proven extremely successful at it in recent times. ``` from keras import layers model = keras.models.Sequential() model.add(layers.LSTM(128, input_shape=(maxlen, len(chars)))) model.add(layers.Dense(len(chars), activation='softmax')) ``` Since our targets are one-hot encoded, we will use `categorical_crossentropy` as the loss to train the model: ``` optimizer = keras.optimizers.RMSprop(lr=0.01) model.compile(loss='categorical_crossentropy', optimizer=optimizer) ``` ## Training the language model and sampling from it Given a trained model and a seed text snippet, we generate new text by repeatedly: * 1) Drawing from the model a probability distribution over the next character given the text available so far * 2) Reweighting the distribution to a certain "temperature" * 3) Sampling the next character at random according to the reweighted distribution * 4) Adding the new character at the end of the available text This is the code we use to reweight the original probability distribution coming out of the model, and draw a character index from it (the "sampling function"): ``` def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) ``` Finally, this is the loop where we repeatedly train and generated text. We start generating text using a range of different temperatures after every epoch. This allows us to see how the generated text evolves as the model starts converging, as well as the impact of temperature in the sampling strategy. ``` import random import sys for epoch in range(1, 60): print('epoch', epoch) # Fit the model for 1 epoch on the available training data model.fit(x, y, batch_size=128, epochs=1) # Select a text seed at random start_index = random.randint(0, len(text) - maxlen - 1) generated_text = text[start_index: start_index + maxlen] print('--- Generating with seed: "' + generated_text + '"') for temperature in [0.2, 0.5, 1.0, 1.2]: print('------ temperature:', temperature) sys.stdout.write(generated_text) # We generate 400 characters for i in range(400): sampled = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(generated_text): sampled[0, t, char_indices[char]] = 1. preds = model.predict(sampled, verbose=0)[0] next_index = sample(preds, temperature) next_char = chars[next_index] generated_text += next_char generated_text = generated_text[1:] sys.stdout.write(next_char) sys.stdout.flush() print() ``` As you can see, a low temperature results in extremely repetitive and predictable text, but where local structure is highly realistic: in particular, all words (a word being a local pattern of characters) are real English words. With higher temperatures, the generated text becomes more interesting, surprising, even creative; it may sometimes invent completely new words that sound somewhat plausible (such as "eterned" or "troveration"). With a high temperature, the local structure starts breaking down and most words look like semi-random strings of characters. Without a doubt, here 0.5 is the most interesting temperature for text generation in this specific setup. Always experiment with multiple sampling strategies! A clever balance between learned structure and randomness is what makes generation interesting. Note that by training a bigger model, longer, on more data, you can achieve generated samples that will look much more coherent and realistic than ours. But of course, don't expect to ever generate any meaningful text, other than by random chance: all we are doing is sampling data from a statistical model of which characters come after which characters. Language is a communication channel, and there is a distinction between what communications are about, and the statistical structure of the messages in which communications are encoded. To evidence this distinction, here is a thought experiment: what if human language did a better job at compressing communications, much like our computers do with most of our digital communications? Then language would be no less meaningful, yet it would lack any intrinsic statistical structure, thus making it impossible to learn a language model like we just did. ## Take aways * We can generate discrete sequence data by training a model to predict the next tokens(s) given previous tokens. * In the case of text, such a model is called a "language model" and could be based on either words or characters. * Sampling the next token requires balance between adhering to what the model judges likely, and introducing randomness. * One way to handle this is the notion of _softmax temperature_. Always experiment with different temperatures to find the "right" one.
github_jupyter
# Getting to know LSTMs better Created: September 13, 2018 Author: Thamme Gowda Goals: - To get batches of *unequal length sequences* encoded correctly! - Know how the hidden states flow between encoders and decoders - Know how the multiple stacked LSTM layers pass hidden states Example: a simple bi-directional LSTM which takes 3d input vectors and produces 2d output vectors. ``` import torch from torch import nn lstm = nn.LSTM(3, 2, batch_first=True, bidirectional=True) # Lets create a batch input. # 3 sequences in batch (the first dim) , see batch_first=True # Then the logest sequence is 4 time steps, ==> second dimension # Each time step has 3d vector which is input ==> last dimension pad_seq = torch.rand(3, 4, 3) # That is nice for the theory # but in practice we are dealing with un equal length sequences # among those 3 sequences in the batch, lets us say # first sequence is the longest, with 4 time steps --> no padding needed # second seq is 3 time steps --> pad the last time step pad_seq[1, 3, :] = 0.0 # third seq is 2 time steps --> pad the last two steps pad_seq[2, 2:, :] = 0.0 print("Padded Input:") print(pad_seq) # so we got these lengths lens = [4,3,2] print("Sequence Lenghts: ", lens) # lets send padded seq to LSTM out,(h_t, c_t) = lstm(pad_seq) print("All Outputs:") print(out) ``` ^^ Output is 2x2d=4d vector since it is bidirectional forward 2d, backward 2d are concatenated Total vectors=12: 3 seqs in batch x 4 time steps;; each vector is 4d > Hmm, what happened to my padding time steps? Will padded zeros mess with the internal weights of LSTM when I do backprop? --- Lets look at the last Hidden state ``` print(h_t) ``` Last hidden state is a 2d (same as output) vectors, but 2 for each step because of bidirectional rnn There are 3 of them since there were three seqs in the batch each corresponding to the last step But the definition of *last time step* is bit tricky For the left-to-right LSTM, it is the last step of input For the right-to-left LSTM, it is the first step of input This makes sense now. --- Lets look at $c_t$: ``` print("Last c_t:") print(c_t) ``` This should be similar to the last hidden state. ## Question: > what happened to my padding time steps? Did the last hidden state exclude the padded time steps? I can see that last hidden state of the forward LSTM didnt distinguish padded zeros. Lets see output of each time steps and last hidden state of left-to-right LSTM, again. We know that the lengths (after removing padding) are \[4,3,2] ``` print("All time stamp outputs:") print(out[:, :, :2]) print("Last hidden state (forward LSTM):") print(h_t[0]) ``` *Okay, Now I get it.* When building sequence to sequence (for Machine translation) I cant pass last hidden state like this to a decoder. We have to inform the LSTM about lengths. How? Thats why we have `torch.nn.utils.rnn.pack_padded_sequence` ``` print("Padded Seqs:") print(pad_seq) print("Lens:", lens) print("Pack Padded Seqs:") pac_pad_seq = torch.nn.utils.rnn.pack_padded_sequence(pad_seq, lens, batch_first=True) print(pac_pad_seq) ``` Okay, this is doing some magic -- getting rid of all padded zeros -- Cool! `batch_sizes=tensor([3, 3, 2, 1]` seems to be the main ingredient of this magic. `[3, 3, 2, 1]` I get it! We have 4 time steps in batch. - First two step has all 3 seqs in the batch. - third step is made of first 2 seqs in batch. - Fourth step is made of first seq in batch I now understand why the sequences in the batch has to be sorted by descending order of lengths! Now let us send it to LSTM and see what it produces ``` pac_pad_out, (pac_ht, pac_ct) = lstm(pac_pad_seq) # Lets first look at output. this is packed output print(pac_pad_out) ``` Okay this is packed output. Sequences are of unequal lengths. Now we need to restore the output by padding 0s for shorter sequences. ``` pad_out = nn.utils.rnn.pad_packed_sequence(pac_pad_out, batch_first=True, padding_value=0) print(pad_out) ``` Output looks good! Now Let us look at the hidden state. ``` print(pac_ht) ``` This is great. As we see the forward (or Left-to-right) LSTM's last hidden state is proper as per the lengths. So should be the c_t. Let us concatenate forward and reverse LSTM's hidden states ``` torch.cat([pac_ht[0],pac_ht[1]], dim=1) ``` ---- # Multi Layer LSTM Let us redo the above hacking to understand how 2 layer LSTM works ``` n_layers = 2 inp_size = 3 out_size = 2 lstm2 = nn.LSTM(inp_size, out_size, num_layers=n_layers, batch_first=True, bidirectional=True) pac_out, (h_n, c_n) = lstm2(pac_pad_seq) print("Packed Output:") print(pac_out) pad_out = nn.utils.rnn.pad_packed_sequence(pac_out, batch_first=True, padding_value=0) print("Pad Output:") print(pad_out) print("Last h_n:") print(h_n) print("Last c_n:") print(c_n) ``` The LSTM output looks similar to single layer LSTM. However the ht and ct states are bigger -- since there are two layers. Now its time to RTFM. > h_n of shape `(num_layers * num_directions, batch, hidden_size)`: tensor containing the hidden state for `t = seq_len`. Like output, the layers can be separated using `h_n.view(num_layers, num_directions, batch, hidden_size)` and similarly for c_n. ``` batch_size = 3 num_dirs = 2 l_n_h_n = h_n.view(n_layers, num_dirs, batch_size, out_size)[-1] # last layer last time step hidden state print(l_n_h_n) last_hid = torch.cat([l_n_h_n[0], l_n_h_n[1]], dim=1) print("last layer last time stamp hidden state") print(last_hid) print("Padded Outputs :") print(pad_out) ```
github_jupyter
## Differential Privacy - Simple Database Queries The database is going to be a VERY simple database with only one boolean column. Each row corresponds to a person. Each value corresponds to whether or not that person has a certain private attribute (such as whether they have a certain disease, or whether they are above/below a certain age). We are then going to learn how to know whether a database query over such a small database is differentially private or not - and more importantly - what techniques we can employ to ensure various levels of privacy #### Create a Simple Database To do this, initialize a random list of 1s and 0s (which are the entries in our database). Note - the number of entries directly corresponds to the number of people in our database. ``` import torch # the number of entries in our DB / this of it as number of people in the DB num_entries = 5000 db = torch.rand(num_entries) > 0.5 db ``` ## Generate Parallel Databases > "When querying a database, if I removed someone from the database, would the output of the query change?". In order to check for this, we create "parallel databases" which are simply databases with one entry removed. We'll create a list of every parallel database to the one currently contained in the "db" variable. Then, create a helper function which does the following: - creates the initial database (db) - creates all parallel databases ``` def create_parallel_db(db, remove_index): return torch.cat((db[0:remove_index], db[remove_index+1:])) def create_parallel_dbs(db): parallel_dbs = list() for i in range(len(db)): pdb = create_parallel_db(db, i) parallel_dbs.append(pdb) return parallel_dbs def create_db_and_parallels(num_entries): # generate dbs and parallel dbs on the fly db = torch.rand(num_entries) > 0.5 pdbs = create_parallel_dbs(db) return db, pdbs db, pdbs = create_db_and_parallels(10) pdbs print("Real database:", db) print("Size of real DB", db.size()) print("A sample parallel DB", pdbs[0]) print("Size of parallel DB", pdbs[0].size()) ``` # Towards Evaluating The Differential Privacy of a Function Intuitively, we want to be able to query our database and evaluate whether or not the result of the query is leaking "private" information. > This is about evaluating whether the output of a query changes when we remove someone from the database. Specifically, we want to evaluate the *maximum* amount the query changes when someone is removed (maximum over all possible people who could be removed). To find how much privacy is leaked, we'll iterate over each person in the database and **measure** the difference in the output of the query relative to when we query the entire database. Just for the sake of argument, let's make our first "database query" a simple sum. Aka, we're going to count the number of 1s in the database. ``` db, pdbs = create_db_and_parallels(200) def query(db): return db.sum() query(db) # the output of the parallel dbs is different from the db query query(pdbs[1]) full_db_result = query(db) print(full_db_result) sensitivity = 0 sensitivity_scale = [] for pdb in pdbs: pdb_result = query(pdb) db_distance = torch.abs(pdb_result - full_db_result) if(db_distance > sensitivity): sensitivity_scale.append(db_distance) sensitivity = db_distance sensitivity ``` #### Sensitivity > The maximum amount the query changes when removing an individual from the DB. # Evaluating the Privacy of a Function The difference between each parallel db's query result and the query result for the real database and its max value (which was 1) is called "sensitivity". It corresponds to the function we chose for the query. The "sum" query will always have a sensitivity of exactly 1. We can also calculate sensitivity for other functions as well. Let's calculate sensitivity for the "mean" function. ``` def sensitivity(query, num_entries=1000): db, pdbs = create_db_and_parallels(num_entries) full_db_result = query(db) max_distance = 0 for pdb in pdbs: # for each parallel db, execute the query (sum, or mean, ..., etc) pdb_result = query(pdb) db_distance = torch.abs(pdb_result - full_db_result) if (db_distance > max_distance): max_distance = db_distance return max_distance # our query is now the mean def query(db): return db.float().mean() sensitivity(query) ``` Wow! That sensitivity is WAY lower. Note the intuition here. >"Sensitivity" is measuring how sensitive the output of the query is to a person being removed from the database. For a simple sum, this is always 1, but for the mean, removing a person is going to change the result of the query by rougly 1 divided by the size of the database. Thus, "mean" is a VASTLY less "sensitive" function (query) than SUM. # Calculating L1 Sensitivity For Threshold TO calculate the sensitivty for the "threshold" function: - First compute the sum over the database (i.e. sum(db)) and return whether that sum is greater than a certain threshold. - Then, create databases of size 10 and threshold of 5 and calculate the sensitivity of the function. - Finally, re-initialize the database 10 times and calculate the sensitivity each time. ``` def query(db, threshold=5): """ Query that adds a threshold of 5, and returns whether sum is > threshold or not. """ return (db.sum() > threshold).float() for i in range(10): sens = sensitivity(query, num_entries=10) print(sens) ``` # A Basic Differencing Attack Sadly none of the functions we've looked at so far are differentially private (despite them having varying levels of sensitivity). The most basic type of attack can be done as follows. Let's say we wanted to figure out a specific person's value in the database. All we would have to do is query for the sum of the entire database and then the sum of the entire database without that person! ## Performing a Differencing Attack on Row 10 (How privacy can fail) We'll construct a database and then demonstrate how one can use two different sum queries to explose the value of the person represented by row 10 in the database (note, you'll need to use a database with at least 10 rows) ``` db, _ = create_db_and_parallels(100) db # create a parallel db with that person (index 10) removed pdb = create_parallel_db(db, remove_index=10) pdb # differencing attack using sum query sum(db) - sum(pdb) # a differencing attack using mean query sum(db).float() /len(db) - sum(pdb).float() / len(pdb) # differencing using a threshold (sum(db).float() > 50) - (sum(pdb).float() > 50) ``` # Local Differential Privacy Differential privacy always requires a form of randommess or noise added to the query to protect from things like Differencing Attacks. To explain this, let's look at Randomized Response. ### Randomized Response (Local Differential Privacy) Let's say I have a group of people I wish to survey about a very taboo behavior which I think they will lie about (say, I want to know if they have ever committed a certain kind of crime). I'm not a policeman, I'm just trying to collect statistics to understand the higher level trend in society. So, how do we do this? One technique is to add randomness to each person's response by giving each person the following instructions (assuming I'm asking a simple yes/no question): - Flip a coin 2 times. - If the first coin flip is heads, answer honestly - If the first coin flip is tails, answer according to the second coin flip (heads for yes, tails for no)! Thus, each person is now protected with "plausible deniability". If they answer "Yes" to the question "have you committed X crime?", then it might becasue they actually did, or it might be because they are answering according to a random coin flip. Each person has a high degree of protection. Furthermore, we can recover the underlying statistics with some accuracy, as the "true statistics" are simply averaged with a 50% probability. Thus, if we collect a bunch of samples and it turns out that 60% of people answer yes, then we know that the TRUE distribution is actually centered around 70%, because 70% averaged with a 50% (a coin flip) is 60% which is the result we obtained. However, it should be noted that, especially when we only have a few samples, this comes at the cost of accuracy. This tradeoff exists across all of Differential Privacy. > NOTE: **The greater the privacy protection (plausible deniability) the less accurate the results. ** Let's implement this local DP for our database before! The main goal is to: * Get the most accurate query with the **greatest** amount of privacy * Greatest fit with trust models in the actual world, (don't waste trust) Let's implement local differential privacy: ``` db, pdbs = create_db_and_parallels(100) db def query(db): true_result = torch.mean(db.float()) # local differential privacy is adding noise to data: replacing some # of the values with random values first_coin_flip = (torch.rand(len(db)) > 0.5).float() second_coin_flip = (torch.rand(len(db)) > 0.5).float() # differentially private DB ... augmented_db = db.float() * first_coin_flip + (1 - first_coin_flip) * second_coin_flip # the result is skewed if we do: # torch.mean(augmented_db.float()) # we remove the skewed average that was the result of the differential privacy dp_result = torch.mean(augmented_db.float()) * 2 - 0.5 return dp_result, true_result db, pdbs = create_db_and_parallels(10) private_result, true_result = query(db) print(f"Without noise {private_result}") print(f"With noise: {true_result}") # Increasing the size of the dateset db, pdbs = create_db_and_parallels(100) private_result, true_result = query(db) print(f"Without noise {private_result}") print(f"With noise: {true_result}") # Increasing the size of the dateset even further db, pdbs = create_db_and_parallels(1000) private_result, true_result = query(db) print(f"Without noise {private_result}") print(f"With noise: {true_result}") ``` As we have seen, > The more data we have the more the noise will tend to not affect the output of the query # Varying Amounts of Noise We are going to augment the randomized response query to allow for varying amounts of randomness to be added. To do this, we bias the coin flip to be higher or lower and then run the same experiment. We'll need to both adjust the likelihood of the first coin flip AND the de-skewing at the end (where we create the "augmented_result" variable). ``` # Noise < 0.5 sets the likelihood that the coin flip will be heads, and vice-versa. noise = 0.2 true_result = torch.mean(db.float()) # let's add the noise to data: replacing some of the values with random values first_coin_flip = (torch.rand(len(db)) > noise).float() second_coin_flip = (torch.rand(len(db)) > 0.5).float() # differentially private DB ... augmented_db = db.float() * first_coin_flip + (1 - first_coin_flip) * second_coin_flip # since the result will be skewed if we do: torch.mean(augmented_db.float()) # we'll remove the skewed average above by doing below: dp_result = torch.mean(augmented_db.float()) * 2 - 0.5 sk_result = augmented_db.float().mean() print('True result:', true_result) print('Skewed result:', sk_result) print('De-skewed result:', dp_result) def query(db, noise=0.2): """Default noise(0.2) above sets the likelihood that the coin flip will be heads""" true_result = torch.mean(db.float()) # local diff privacy is adding noise to data: replacing some # of the values with random values first_coin_flip = (torch.rand(len(db)) > noise).float() second_coin_flip = (torch.rand(len(db)) > 0.5).float() # differentially private DB ... augmented_db = db.float() * first_coin_flip + (1 - first_coin_flip) * second_coin_flip # the result is skewed if we do: # torch.mean(augmented_db.float()) # we remove the skewed average that was the result of the differential privacy sk_result = augmented_db.float().mean() private_result = ((sk_result / noise ) - 0.5) * noise / (1 - noise) return private_result, true_result # test varying noise db, pdbs = create_db_and_parallels(10) private_result, true_result = query(db, noise=0.2) print(f"Without noise {private_result}") print(f"With noise: {true_result}") # Increasing the size of the dateset even further db, pdbs = create_db_and_parallels(100) private_result, true_result = query(db, noise=0.4) print(f"Without noise {private_result}") print(f"With noise: {true_result}") # Increasing the size of the dateset even further db, pdbs = create_db_and_parallels(10000) private_result, true_result = query(db, noise=0.8) print(f"Without noise {private_result}") print(f"With noise: {true_result}") ``` From the analysis above, with more data, its easier to protect privacy with noise. It becomes a lot easier to learn about general characteristics in the DB because the algorithm has more data points to look at and compare with each other. So differential privacy mechanisms has helped us filter out any information unique to individual data entities and try to let through information that is consistent across multiple different people in the dataset. > The larger the dataset, the easier it is to protect privacy. # The Formal Definition of Differential Privacy The previous method of adding noise was called "Local Differentail Privacy" because we added noise to each datapoint individually. This is necessary for some situations wherein the data is SO sensitive that individuals do not trust noise to be added later. However, it comes at a very high cost in terms of accuracy. However, alternatively we can add noise AFTER data has been aggregated by a function. This kind of noise can allow for similar levels of protection with a lower affect on accuracy. However, participants must be able to trust that no-one looked at their datapoints _before_ the aggregation took place. In some situations this works out well, in others (such as an individual hand-surveying a group of people), this is less realistic. Nevertheless, global differential privacy is incredibly important because it allows us to perform differential privacy on smaller groups of individuals with lower amounts of noise. Let's revisit our sum functions. ``` db, pdbs = create_db_and_parallels(100) def query(db): return torch.sum(db.float()) def M(db): query(db) + noise query(db) ``` So the idea here is that we want to add noise to the output of our function. We actually have two different kinds of noise we can add - Laplacian Noise or Gaussian Noise. However, before we do so at this point we need to dive into the formal definition of Differential Privacy. ![alt text](dp_formula.png "Title") _Image From: "The Algorithmic Foundations of Differential Privacy" - Cynthia Dwork and Aaron Roth - https://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf_ This definition does not _create_ differential privacy, instead it is a measure of how much privacy is afforded by a query M. Specifically, it's a comparison between running the query M on a database (x) and a parallel database (y). As you remember, parallel databases are defined to be the same as a full database (x) with one entry/person removed. Thus, this definition says that FOR ALL parallel databases, the maximum distance between a query on database (x) and the same query on database (y) will be e^epsilon, but that occasionally this constraint won't hold with probability delta. Thus, this theorem is called "epsilon delta" differential privacy. # Epsilon Let's unpack the intuition of this for a moment. Epsilon Zero: If a query satisfied this inequality where epsilon was set to 0, then that would mean that the query for all parallel databases outputed the exact same value as the full database. As you may remember, when we calculated the "threshold" function, often the Sensitivity was 0. In that case, the epsilon also happened to be zero. Epsilon One: If a query satisfied this inequality with epsilon 1, then the maximum distance between all queries would be 1 - or more precisely - the maximum distance between the two random distributions M(x) and M(y) is 1 (because all these queries have some amount of randomness in them, just like we observed in the last section). # Delta Delta is basically the probability that epsilon breaks. Namely, sometimes the epsilon is different for some queries than it is for others. For example, you may remember when we were calculating the sensitivity of threshold, most of the time sensitivity was 0 but sometimes it was 1. Thus, we could calculate this as "epsilon zero but non-zero delta" which would say that epsilon is perfect except for some probability of the time when it's arbitrarily higher. Note that this expression doesn't represent the full tradeoff between epsilon and delta. # How To Add Noise for Global Differential Privacy Global Differential Privacy adds noise to the output of a query. We'll add noise to the output of our query so that it satisfies a certain epsilon-delta differential privacy threshold. There are two kinds of noise we can add - Gaussian Noise - Laplacian Noise. Generally speaking Laplacian is better, but both are still valid. Now to the hard question... ### How much noise should we add? The amount of noise necessary to add to the output of a query is a function of four things: - the type of noise (Gaussian/Laplacian) - the sensitivity of the query/function - the desired epsilon (ε) - the desired delta (δ) Thus, for each type of noise we're adding, we have different way of calculating how much to add as a function of sensitivity, epsilon, and delta. Laplacian noise is increased/decreased according to a "scale" parameter b. We choose "b" based on the following formula. `b = sensitivity(query) / epsilon` In other words, if we set b to be this value, then we know that we will have a privacy leakage of <= epsilon. Furthermore, the nice thing about Laplace is that it guarantees this with delta == 0. There are some tunings where we can have very low epsilon where delta is non-zero, but we'll ignore them for now. ### Querying Repeatedly - if we query the database multiple times - we can simply add the epsilons (Even if we change the amount of noise and their epsilons are not the same). # Create a Differentially Private Query Let's create a query function which sums over the database and adds just the right amount of noise such that it satisfies an epsilon constraint. query will be for "sum" and for "mean". We'll use the correct sensitivity measures for both. ``` epsilon = 0.001 import numpy as np db, pdbs = create_db_and_parallels(100) db def sum_query(db): return db.sum() def laplacian_mechanism(db, query, sensitivity): beta = sensitivity / epsilon noise = torch.tensor(np.random.laplace(0, beta, 1)) return query(db) + noise laplacian_mechanism(db, sum_query, 0.01) def mean_query(db): return torch.mean(db.float()) laplacian_mechanism(db, mean_query, 1) ``` # Differential Privacy for Deep Learning So what does all of this have to do with Deep Learning? Well, these mechanisms form the core primitives for how Differential Privacy provides guarantees in the context of Deep Learning. ### Perfect Privacy > "a query to a database returns the same value even if we remove any person from the database". In the context of Deep Learning, we have a similar standard. > Training a model on a dataset should return the same model even if we remove any person from the dataset. Thus, we've replaced "querying a database" with "training a model on a dataset". In essence, the training process is a kind of query. However, one should note that this adds two points of complexity which database queries did not have: 1. do we always know where "people" are referenced in the dataset? 2. neural models rarely never train to the same output model, even on identical data The answer to (1) is to treat each training example as a single, separate person. Strictly speaking, this is often overly zealous as some training examples have no relevance to people and others may have multiple/partial (consider an image with multiple people contained within it). Thus, localizing exactly where "people" are referenced, and thus how much your model would change if people were removed, is challenging. The answer to (2) is also an open problem. To solve this, lets look at PATE. ## Scenario: A Health Neural Network You work for a hospital and you have a large collection of images about your patients. However, you don't know what's in them. You would like to use these images to develop a neural network which can automatically classify them, however since your images aren't labeled, they aren't sufficient to train a classifier. However, being a cunning strategist, you realize that you can reach out to 10 partner hospitals which have annotated data. It is your hope to train your new classifier on their datasets so that you can automatically label your own. While these hospitals are interested in helping, they have privacy concerns regarding information about their patients. Thus, you will use the following technique to train a classifier which protects the privacy of patients in the other hospitals. - 1) You'll ask each of the 10 hospitals to train a model on their own datasets (All of which have the same kinds of labels) - 2) You'll then use each of the 10 partner models to predict on your local dataset, generating 10 labels for each of your datapoints - 3) Then, for each local data point (now with 10 labels), you will perform a DP query to generate the final true label. This query is a "max" function, where "max" is the most frequent label across the 10 labels. We will need to add laplacian noise to make this Differentially Private to a certain epsilon/delta constraint. - 4) Finally, we will retrain a new model on our local dataset which now has labels. This will be our final "DP" model. So, let's walk through these steps. I will assume you're already familiar with how to train/predict a deep neural network, so we'll skip steps 1 and 2 and work with example data. We'll focus instead on step 3, namely how to perform the DP query for each example using toy data. So, let's say we have 10,000 training examples, and we've got 10 labels for each example (from our 10 "teacher models" which were trained directly on private data). Each label is chosen from a set of 10 possible labels (categories) for each image. ``` import numpy as np num_teachers = 10 # we're working with 10 partner hospitals num_examples = 10000 # the size of OUR dataset num_labels = 10 # number of lablels for our classifier # fake predictions fake_preds = ( np.random.rand( num_teachers, num_examples ) * num_labels).astype(int).transpose(1,0) fake_preds[:,0] # Step 3: Perform a DP query to generate the final true label/outputs, # Use the argmax function to find the most frequent label across all 10 labels, # Then finally add some noise to make it differentially private. new_labels = list() for an_image in fake_preds: # count the most frequent label the hospitals came up with label_counts = np.bincount(an_image, minlength=num_labels) epsilon = 0.1 beta = 1 / epsilon for i in range(len(label_counts)): # for each label, add some noise to the counts label_counts[i] += np.random.laplace(0, beta, 1) new_label = np.argmax(label_counts) new_labels.append(new_label) # new_labels new_labels[:10] ``` # PATE Analysis ``` # lets say the hospitals came up with these outputs... 9, 9, 3, 6 ..., 2 labels = np.array([9, 9, 3, 6, 9, 9, 9, 9, 8, 2]) counts = np.bincount(labels, minlength=10) print(counts) query_result = np.argmax(counts) query_result ``` If every hospital says the result is 9, then we have very low sensitivity. We could remove a person, from the dataset, and the query results still is 9, then we have not leaked any information. Core assumption: The same patient was not present at any of this two hospitals. Removing any one of this hospitals, acts as a proxy to removing one person, which means that if we do remove one hospital, the query result should not be different. ``` from syft.frameworks.torch.differential_privacy import pate num_teachers, num_examples, num_labels = (100, 100, 10) # generate fake predictions/labels preds = (np.random.rand(num_teachers, num_examples) * num_labels).astype(int) indices = (np.random.rand(num_examples) * num_labels).astype(int) # true answers preds[:,0:10] *= 0 # perform PATE to find the data depended epsilon and data independent epsilon data_dep_eps, data_ind_eps = pate.perform_analysis( teacher_preds=preds, indices=indices, noise_eps=0.1, delta=1e-5 ) print('Data Independent Epsilon', data_ind_eps) print('Data Dependent Epsilon', data_dep_eps) assert data_dep_eps < data_ind_eps data_dep_eps, data_ind_eps = pate.perform_analysis(teacher_preds=preds, indices=indices, noise_eps=0.1, delta=1e-5) print("Data Independent Epsilon:", data_ind_eps) print("Data Dependent Epsilon:", data_dep_eps) preds[:,0:50] *= 0 data_dep_eps, data_ind_eps = pate.perform_analysis(teacher_preds=preds, indices=indices, noise_eps=0.1, delta=1e-5, moments=20) print("Data Independent Epsilon:", data_ind_eps) print("Data Dependent Epsilon:", data_dep_eps) ``` # Where to Go From Here Read: - Algorithmic Foundations of Differential Privacy: https://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf - Deep Learning with Differential Privacy: https://arxiv.org/pdf/1607.00133.pdf - The Ethical Algorithm: https://www.amazon.com/Ethical-Algorithm-Science-Socially-Design/dp/0190948205 Topics: - The Exponential Mechanism - The Moment's Accountant - Differentially Private Stochastic Gradient Descent Advice: - For deployments - stick with public frameworks! - Join the Differential Privacy Community - Don't get ahead of yourself - DP is still in the early days # Application of DP in Private Federated Learning DP works by adding statistical noise either at the input level or output level of the model so that you can mask out individual user contribution, but at the same time gain insight into th overall population without sacrificing privacy. > Case: Figure out average money one has in their pockets. We could go and ask someone how much they have in their wallet. They pick a random number between -100 and 100. Add that to the real value, say $20 and a picked number of 100. resulting in 120. That way, we have no way to know what the actual amount of money in their wallet is. When sufficiently large numbers of people submit these results, if we take the average, the noise will cancel out and we'll start seeing the true average. Apart from statistical use cases, we can apply DP in Private Federated learning. Suppose you want to train a model using distributed learning across a number of user devices. One way to do that is to get all the private data from the devices, but that's not very privacy friendly. Instead, we send the model from the server back to the devices. The devices will then train the model using their user data, and only send the privatized model updates back to the server. Server will then aggregate the updates and make an informed decision of the overall model on the server. As you do more and more rounds, slowly the model converges to the true population without private user data having to leave the devices. If you increase the level of privacy, the model converges a bit slower and vice versa. # Project: For the final project for this section, you're going to train a DP model using this PATE method on the MNIST dataset, provided below. ``` import torchvision.datasets as datasets mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None) train_data = mnist_trainset.train_data train_targets = mnist_trainset.train_labels test_data = mnist_trainset.test_data test_targets = mnist_trainset.test_labels ```
github_jupyter
# Lesson 9 Practice: Supervised Machine Learning Use this notebook to follow along with the lesson in the corresponding lesson notebook: [L09-Supervised_Machine_Learning-Lesson.ipynb](./L09-Supervised_Machine_Learning-Lesson.ipynb). ## Instructions Follow along with the teaching material in the lesson. Throughout the tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: ![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/16/Apps-gnome-info-icon.png). You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. For each task, use the cell below it to write and test your code. You may add additional cells for any task as needed or desired. ## Task 1a: Setup Import the following package sets: + packages for data management + pacakges for visualization + packages for machine learning Remember to activate the `%matplotlib inline` magic. ``` %matplotlib inline # Data Management import numpy as np import pandas as pd # Visualization import seaborn as sns import matplotlib.pyplot as plt # Machine learning from sklearn import model_selection from sklearn import preprocessing from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis ``` ## Task 2a: Data Exploration After reviewing the data in sections 2.1, 2.2, 2.3 and 2.4 do you see any problems with this iris dataset? If so, please describe them in the practice notebook. If not, simply indicate that there are no issues. ## Task 2b: Make Assumptions After reviewing the data in sections 2.1, 2.2, 2.3 and 2.4 are there any columns that would make poor predictors of species? **Hint**: columns that are poor predictors are: + those with too many missing values + those with no difference in variation when grouped by the outcome class + variables with high levels of collinearity ## Task 3a: Practice with the random forest classifier Now that you have learned how to perform supervised machine learning using a variety of algorithms, lets practice using a new algorithm we haven't looked at yet: the Random Forest Classifier. The random forest classifier builds multiple decision trees and merges them together. Review the sklearn [online documentation for the RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html). For this task: 1. Perform a 10-fold cross-validation strategy to see how well the random forest classifier performs with the iris data 2. Use a boxplot to show the distribution of accuracy 3. Use the `fit` and `predict` functions to see how well it performs with the testing data. 4. Plot the confusion matrix 5. Print the classification report. ``` iris = sns.load_dataset('iris') X = iris.loc[:,'sepal_length':'petal_width'].values Y = iris['species'].values X = preprocessing.robust_scale(X) Xt, Xv, Yt, Yv = model_selection.train_test_split(X, Y, test_size=0.2, random_state=10) kfold = model_selection.KFold(n_splits=10, random_state=10) results = { 'LogisticRegression' : np.zeros(10), 'LinearDiscriminantAnalysis' : np.zeros(10), 'KNeighborsClassifier' : np.zeros(10), 'DecisionTreeClassifier' : np.zeros(10), 'GaussianNB' : np.zeros(10), 'SVC' : np.zeros(10), 'RandomForestClassifier': np.zeros(10) } results # Create the LogisticRegression object prepared for a multinomial outcome validation set. alg = RandomForestClassifier() # Execute the cross-validation strategy results['RandomForestClassifier'] = model_selection.cross_val_score(alg, Xt, Yt, cv=kfold, scoring="accuracy", error_score=np.nan) # Take a look at the scores for each of the 10-fold runs. results['RandomForestClassifier'] pd.DataFrame(results).plot(kind="box", rot=90); # Create the LinearDiscriminantAnalysis object with defaults. alg = RandomForestClassifier() # Create a new model using all of the training data. alg.fit(Xt, Yt) # Using the testing data, predict the iris species. predictions = alg.predict(Xv) # Let's see the predictions predictions accuracy_score(Yv, predictions) labels = ['versicolor', 'virginica', 'setosa'] cm = confusion_matrix(Yv, predictions, labels=labels) print(cm) ```
github_jupyter
# Time handling Last year in this course, people asked: "how do you handle times?" That's a good question... ## Exercise What is the ambiguity in these cases? 1. Meet me for lunch at 12:00 2. The meeting is at 14:00 3. How many hours are between 01:00 and 06:00 (in the morning) 4. When does the new year start? Local times are a *political* construction and subject to change. They differ depending on where you are. Human times are messy. If you try to do things with human times, you can expect to be sad. But still, *actual* time advances at the same rate all over the world (excluding relativity). There *is* a way to do this. ## What are timezones? A timezone specifies a certain *local time* at a certain location on earth. If you specify a timestamp such as 14:00 on 1 October 2019, it is **naive** if it does not include a timezone. Dependon on where you are standing, you can experience this timestamp at different times. If it include a timezone, it is **aware**. An aware timestamp exactly specifies a certain time across the whole world (but depending on where you are standing, your localtime may be different). **UTC** (coordinated universal time) is a certain timezone - the basis of all other timezones. Unix computers have a designated **localtime** timezone, which is used by default to display things. This is in the `TZ` environment variable. The **tz database** (or zoneinfo) is a open source, comprehensive, updated catalog of all timezones across the whole planet since 1970. It contains things like `EET`, `EEST`, but also geographic locations like `Europe/Helsinki` because the abbreviations can change. [Wikipedia](https://en.wikipedia.org/wiki/Tz_database) and [list of all zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). ## unixtime Unixtime is zero at 00:00 on 1 January 1970, and increases at a rate of one per second. This definition defines a single unique time everywhere in the world. You can find unixtime with the `date +%s` command: ``` !date +%s ``` You can convert from unixtime to real (local) time using the date command again ``` !date -d @1234567890 ``` There are functions which take (unixtime + timezone) and produce the timestamp (year, month, day, hour, minute, second). And vice versa. Unix time has two main benefits: * Un-ambiguous: defines a single time * You can do math on the times and compute differences, add time, etc, and it just works. ## Recommendations When you have times, always store them in unixtime in numerical format. When you need a human time (e.g. "what hour was this time"), you use a function to compute that property *in a given timezone*. If you store the other time components, for example hour and minute, this is just for convenience and you should *not* assume that you can go back to the unixtime to do math. [Richard's python time reference](http://rkd.zgib.net/wiki/DebianNotes/PythonTime) is the only comprehensive cataloging of Python that he knows of. ## Exercises To do these, you have to search for the functions yourself. ### 1. Convert this unixtime to localtime in Helsinki ``` ts = 1570078806 ``` ### 2. Convert the same time to UTC ### Convert that unixtime to a pandas `Timestamp` You'll need to search the docs some... ## Localization and conversion If you are given a time like "14:00 1 October 2019", and you want to convert it to a different timezone, can you? No, because there is no timezone already. You have to **localize** it by applying a timezone, then you can convert. ``` import pytz tz = pytz.timezone("Asia/Tokyo") tz # Make a timestamp from a real time. We dont' know when this is... import pandas as pd import datetime dt = pd.Timestamp(datetime.datetime(2019, 10, 1, 14, 0)) dt dt.timestamp() # Localize it - interpert it as a certain timezone localized = dt.tz_localize(tz) localized dt.timestamp() converted = localized.tz_convert(pytz.timezone('Europe/Helsinki')) converted ``` And we notice it does the conversion... if we don't localize first, then this doesn't work. ## Exercises ### 1. Convert this timestamp to a pandas timestamp in Europe/Helsinki and Asia/Tokyo ``` ts = 1570078806 ``` ### Print the day of the year and hour of this unixtime ## From the command line ``` !date !date -d "15:00" !date -d "15:00 2019-10-31" !date -d "15:00 2019-10-31" +%s !date -d @1572526800 !TZ=America/New_York date -d @1572526800 !date -d '2019-10-01 14:00 CEST' ``` ## See also * Julian day - days since 1 January year 4713BCE, or Gregorian ordinal - days since 1 january year 1. Useful if you need to do date, instead of time, arithmetic. * [Richard's python-time reference](http://rkd.zgib.net/wiki/DebianNotes/PythonTime)
github_jupyter
### Hyper Parameter Tuning One of the primary objective and challenge in machine learning process is improving the performance score, based on data patterns and observed evidence. To achieve this objective, almost all machine learning algorithms have specific set of parameters that needs to estimate from dataset which will maximize the performance score. The best way to choose good hyperparameters is through trial and error of all possible combination of parameter values. Scikit-learn provide GridSearch and RandomSearch functions to facilitate automatic and reproducible approach for hyperparameter tuning. ``` from IPython.display import Image Image(filename='../Chapter 4 Figures/Hyper_Parameter_Tuning.png', width=1000) ``` ### GridSearch ``` import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.cross_validation import train_test_split from sklearn import cross_validation from sklearn import metrics from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt %matplotlib inline from sklearn.ensemble import RandomForestClassifier from sklearn.grid_search import GridSearchCV seed = 2017 # read the data in df = pd.read_csv("Data/Diabetes.csv") X = df.ix[:,:8].values # independent variables y = df['class'].values # dependent variables #Normalize X = StandardScaler().fit_transform(X) # evaluate the model by splitting into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=seed) kfold = cross_validation.StratifiedKFold(y=y_train, n_folds=5, random_state=seed) num_trees = 100 clf_rf = RandomForestClassifier(random_state=seed).fit(X_train, y_train) rf_params = { 'n_estimators': [100, 250, 500, 750, 1000], 'criterion': ['gini', 'entropy'], 'max_features': [None, 'auto', 'sqrt', 'log2'], 'max_depth': [1, 3, 5, 7, 9] } # setting verbose = 10 will print the progress for every 10 task completion grid = GridSearchCV(clf_rf, rf_params, scoring='roc_auc', cv=kfold, verbose=10, n_jobs=-1) grid.fit(X_train, y_train) print 'Best Parameters: ', grid.best_params_ results = cross_validation.cross_val_score(grid.best_estimator_, X_train,y_train, cv=kfold) print "Accuracy - Train CV: ", results.mean() print "Accuracy - Train : ", metrics.accuracy_score(grid.best_estimator_.predict(X_train), y_train) print "Accuracy - Test : ", metrics.accuracy_score(grid.best_estimator_.predict(X_test), y_test) ``` ### RandomSearch ``` from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint as sp_randint # specify parameters and distributions to sample from param_dist = {'n_estimators':sp_randint(100,1000), 'criterion': ['gini', 'entropy'], 'max_features': [None, 'auto', 'sqrt', 'log2'], 'max_depth': [None, 1, 3, 5, 7, 9] } # run randomized search n_iter_search = 20 random_search = RandomizedSearchCV(clf_rf, param_distributions=param_dist, cv=kfold, n_iter=n_iter_search, verbose=10, n_jobs=-1, random_state=seed) random_search.fit(X_train, y_train) # report(random_search.cv_results_) print 'Best Parameters: ', random_search.best_params_ results = cross_validation.cross_val_score(random_search.best_estimator_, X_train,y_train, cv=kfold) print "Accuracy - Train CV: ", results.mean() print "Accuracy - Train : ", metrics.accuracy_score(random_search.best_estimator_.predict(X_train), y_train) print "Accuracy - Test : ", metrics.accuracy_score(random_search.best_estimator_.predict(X_test), y_test) from bayes_opt import BayesianOptimization from sklearn.cross_validation import cross_val_score def rfccv(n_estimators, min_samples_split, max_features): return cross_val_score(RandomForestClassifier(n_estimators=int(n_estimators), min_samples_split=int(min_samples_split), max_features=min(max_features, 0.999), random_state=2017), X_train, y_train, 'f1', cv=kfold).mean() gp_params = {"alpha": 1e5} rfcBO = BayesianOptimization(rfccv, {'n_estimators': (100, 1000), 'min_samples_split': (2, 25), 'max_features': (0.1, 0.999)}) rfcBO.maximize(n_iter=10, **gp_params) print('RFC: %f' % rfcBO.res['max']['max_val']) ```
github_jupyter