text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
```
import pandas as pd
from lifelines import KaplanMeierFitter
import seaborn as sns
import matplotlib.pyplot as plt
preprints_df = pd.read_csv("output/biorxiv_article_metadata.tsv", sep="\t",)
preprints_df["date_received"] = pd.to_datetime(preprints_df["date_received"])
xml_df = (
preprints_df.sort_values(by="date_received")
.dropna(subset=["date_received"])
.groupby("doi")
.first()
)
api_df = pd.read_csv("output/biorxiv_published_api_data.tsv", sep="\t")
api_df[api_df["published_date"].str.contains(":")]
index = api_df[api_df["published_date"].str.contains(":")].index
api_df.loc[index, "published_date"] = (
api_df.loc[index, "published_date"].str.split(":").str[0]
)
for col in ["preprint_date", "published_date"]:
api_df[col] = pd.to_datetime(api_df[col])
api_df.set_index("biorxiv_doi")
merged_df = pd.merge(
xml_df,
api_df.set_index("biorxiv_doi"),
left_index=True,
right_index=True,
how="outer",
)
merged_df
merged_df["document"].isna().sum()
merged_df["published_doi"].isna().sum()
len(merged_df)
# lets ignore papers we don't have xmls for
merged_df = pd.merge(
xml_df,
api_df.set_index("biorxiv_doi"),
left_index=True,
right_index=True,
how="left",
)
merged_df["published"] = ~merged_df["published_doi"].isna()
# I should change this to when the data was pulled, but I didn't record that for now :(
merged_df.loc[merged_df["published"], "observation_date"] = merged_df.loc[
merged_df["published"], "published_date"
]
merged_df.loc[~merged_df["published"], "observation_date"] = pd.datetime.today()
merged_df["observation_duration"] = (
merged_df["observation_date"] - merged_df["date_received"]
)
(merged_df["observation_duration"] < pd.Timedelta(0)).sum()
merged_df = merged_df[merged_df["observation_duration"] > pd.Timedelta(0)]
ax = sns.distplot(
merged_df["observation_duration"].dt.total_seconds() / 60 / 60 / 24 / 365
)
kmf = KaplanMeierFitter()
kmf.fit(
merged_df["observation_duration"].dt.total_seconds() / 60 / 60 / 24 / 365,
event_observed=merged_df["published"],
)
ax = kmf.plot(label="all papers", logx=True)
_ = ax.set_ylabel("proportion of unpublished biorxiv papers")
_ = ax.set_xlabel("timeline (years)")
_ = ax.set_ylim(0, 1)
f = plt.figure(figsize=(10, 8))
ax = None
for category, cat_group in merged_df.groupby("category"):
kmf.fit(
cat_group["observation_duration"].dt.total_seconds() / 60 / 60 / 24 / 365,
event_observed=cat_group["published"],
)
ax = kmf.plot(label=category, ax=ax, ci_show=False, logx=True)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
_ = ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), title="Biorxiv category")
_ = ax.set_ylabel("proportion of unpublished biorxiv papers")
_ = ax.set_xlabel("timeline (years)")
_ = ax.set_ylim(0, 1)
merged_df["doi_prefix"] = merged_df["published_doi"].str.split("/").str[0]
%%time
f = plt.figure(figsize=(10, 8))
ax = None
for category, cat_group in merged_df.groupby("doi_prefix"):
if len(cat_group) > 100:
kmf.fit(
cat_group["observation_duration"].dt.total_seconds() / 60 / 60 / 24 / 365,
event_observed=cat_group["published"],
)
ax = kmf.plot(label=category, ax=ax, ci_show=False, logx=True)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
_ = ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), title="DOI prefix")
_ = ax.set_ylabel("proportion of unpublished biorxiv papers")
_ = ax.set_xlabel("timeline (years)")
_ = ax.set_ylim(0, 1)
%%time
doi_prefix_df = merged_df.groupby("doi_prefix").apply(
lambda cat_group: pd.Series(
{
"count": len(cat_group),
"80th_percentile": kmf.fit(
cat_group["observation_duration"].dt.total_seconds() / 60 / 60 / 24,
event_observed=cat_group["published"],
).percentile(0.8),
}
)
)
doi_prefix_df[doi_prefix_df["count"] > 50].sort_values("80th_percentile").head()
```
F1000 Research Ltd <== 10.12688
MDPI AG <== 10.3390 - wikipedia notes questionable quality of peer-review
| github_jupyter |
# Use BlackJAX with Numpyro
BlackJAX can take any log-probability function as long as it is compatible with JAX's JIT. In this notebook we show how we can use Numpyro as a modeling language and BlackJAX as an inference library.
We reproduce the Eight Schools example from the [Numpyro documentation](https://github.com/pyro-ppl/numpyro) (all credit for the model goes to the Numpyro team). For this notebook to run you will need to install Numpyro:
```bash
pip install numpyro
```
```
import jax
import numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.infer.reparam import TransformReparam
from numpyro.infer.util import initialize_model
import blackjax
num_warmup = 1000
# We can use this notebook for simple benchmarking by setting
# below to True and run from Terminal.
# $ipython examples/use_with_numpyro.ipynb
RUN_BENCHMARK = False
if RUN_BENCHMARK:
num_sample = 5_000_000
print(f"Benchmark with {num_warmup} warmup steps and {num_sample} sampling steps.")
else:
num_sample = 10_000
```
## Data
```
# Data of the Eight Schools Model
J = 8
y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])
sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])
```
## Model
We use the non-centered version of the model described towards the end of the README on Numpyro's repository:
```
# Eight Schools example - Non-centered Reparametrization
def eight_schools_noncentered(J, sigma, y=None):
mu = numpyro.sample("mu", dist.Normal(0, 5))
tau = numpyro.sample("tau", dist.HalfCauchy(5))
with numpyro.plate("J", J):
with numpyro.handlers.reparam(config={"theta": TransformReparam()}):
theta = numpyro.sample(
"theta",
dist.TransformedDistribution(
dist.Normal(0.0, 1.0), dist.transforms.AffineTransform(mu, tau)
),
)
numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)
```
We need to translate the model into a log-probability function that will be used by BlackJAX to perform inference. For that we use the `initialize_model` function in Numpyro's internals. We will also use the initial position it returns:
```
rng_key = jax.random.PRNGKey(0)
init_params, potential_fn_gen, *_ = initialize_model(
rng_key,
eight_schools_noncentered,
model_args=(J, sigma, y),
dynamic_args=True,
)
```
Now we create the potential using the `potential_fn_gen` provided by Numpyro and initialize the NUTS state with BlackJAX:
```
if RUN_BENCHMARK:
print("\nBlackjax:")
print("-> Running warmup.")
```
We now run the window adaptation in BlackJAX:
```
%%time
initial_position = init_params.z
logprob = lambda position: -potential_fn_gen(J, sigma, y)(position)
adapt = blackjax.window_adaptation(
blackjax.nuts, logprob, num_warmup, target_acceptance_rate=0.8
)
last_state, kernel, _ = adapt.run(rng_key, initial_position)
```
Let us now perform inference using the previously computed step size and inverse mass matrix. We also time the sampling to give you an idea of how fast BlackJAX can be on simple models:
```
if RUN_BENCHMARK:
print("-> Running sampling.")
%%time
def inference_loop(rng_key, kernel, initial_state, num_samples):
@jax.jit
def one_step(state, rng_key):
state, info = kernel(rng_key, state)
return state, (state, info)
keys = jax.random.split(rng_key, num_samples)
_, (states, infos) = jax.lax.scan(one_step, initial_state, keys)
return states, (
infos.acceptance_probability,
infos.is_divergent,
infos.integration_steps,
)
# Sample from the posterior distribution
states, infos = inference_loop(rng_key, kernel, last_state, num_sample)
_ = states.position["mu"].block_until_ready()
```
Let us compute the average acceptance probability and check the number of divergences (to make sure that the model sampled correctly, and that the sampling time is not a result of a majority of divergent transitions):
```
acceptance_rate = np.mean(infos[0])
num_divergent = np.mean(infos[1])
print(f"\nAcceptance rate: {acceptance_rate:.2f}")
print(f"{100*num_divergent:.2f}% divergent transitions")
```
Let us now plot the distribution of the parameters. Note that since we use a transformed variable, Numpyro does not output the school treatment effect directly:
```
if not RUN_BENCHMARK:
import seaborn as sns
from matplotlib import pyplot as plt
samples = states.position
fig, axes = plt.subplots(ncols=2)
fig.set_size_inches(12, 5)
sns.kdeplot(samples["mu"], ax=axes[0])
sns.kdeplot(samples["tau"], ax=axes[1])
axes[0].set_xlabel("mu")
axes[1].set_xlabel("tau")
fig.tight_layout()
if not RUN_BENCHMARK:
fig, axes = plt.subplots(8, 2, sharex="col", sharey="col")
fig.set_size_inches(12, 10)
for i in range(J):
axes[i][0].plot(samples["theta_base"][:, i])
axes[i][0].title.set_text(f"School {i} relative treatment effect chain")
sns.kdeplot(samples["theta_base"][:, i], ax=axes[i][1], shade=True)
axes[i][1].title.set_text(f"School {i} relative treatment effect distribution")
axes[J - 1][0].set_xlabel("Iteration")
axes[J - 1][1].set_xlabel("School effect")
fig.tight_layout()
plt.show()
if not RUN_BENCHMARK:
for i in range(J):
print(
f"Relative treatment effect for school {i}: {np.mean(samples['theta_base'][:, i]):.2f}"
)
```
## Compare sampling time with Numpyro
We compare the time it took BlackJAX to do the warmup for 1,000 iterations and then taking 100,000 samples with Numpyro's:
```
from numpyro.infer import MCMC, NUTS
if RUN_BENCHMARK:
print("\nNumpyro:")
print("-> Running warmup+sampling.")
%%time
nuts_kernel = NUTS(eight_schools_noncentered, target_accept_prob=0.8)
mcmc = MCMC(
nuts_kernel, num_warmup=num_warmup, num_samples=num_sample, progress_bar=False
)
rng_key = jax.random.PRNGKey(0)
mcmc.run(rng_key, J, sigma, y=y, extra_fields=("num_steps", "accept_prob"))
samples = mcmc.get_samples()
_ = samples["mu"].block_until_ready()
print(f"\nAcceptance rate: {mcmc.get_extra_fields()['accept_prob'].mean():.2f}")
print(f"{100*mcmc.get_extra_fields()['diverging'].mean():.2f}% divergent transitions")
print(f"\nBlackjax average {infos[2].mean():.2f} leapfrog per iteration.")
print(
f"Numpyro average {mcmc.get_extra_fields()['num_steps'].mean():.2f} leapfrog per iteration."
)
```
| github_jupyter |
# 1. Import libraries
```
#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
matplotlib.style.use('ggplot')
import random
import scipy.sparse as sparse
import scipy.io
from keras.utils import to_categorical
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from skfeature.function.similarity_based import lap_score
from skfeature.utility import construct_W
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
import time
import pandas as pd
def mse_check(train, val):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(val[0]) - val[1]) ** 2).mean()
return MSELR
def next_batch(samples, labels, num):
# Return a total of `num` random samples and labels.
idx = np.random.choice(len(samples), num)
return samples[idx], labels[idx]
def standard_single_hidden_layer_autoencoder(X, units, O):
reg_alpha = 1e-3
D = X.shape[1]
weights = tf.get_variable("weights", [D, units])
biases = tf.get_variable("biases", [units])
X = tf.matmul(X, weights) + biases
X = tf.layers.dense(X, O, kernel_regularizer = tf.contrib.layers.l2_regularizer(reg_alpha))
return X, weights
def aefs_subset_selector(train, K, epoch_num=1000, alpha=0.1):
D = train[0].shape[1]
O = train[1].shape[1]
learning_rate = 0.001
tf.reset_default_graph()
X = tf.placeholder(tf.float32, (None, D))
TY = tf.placeholder(tf.float32, (None, O))
Y, weights = standard_single_hidden_layer_autoencoder(X, K, O)
loss = tf.reduce_mean(tf.square(TY - Y)) + alpha * tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(weights), axis=1)), axis=0) + tf.losses.get_total_loss()
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
batch_size = 8
batch_per_epoch = train[0].shape[0] // batch_size
costs = []
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = False
with tf.Session(config = session_config) as sess:
sess.run(init)
for ep in range(epoch_num):
cost = 0
for batch_n in range(batch_per_epoch):
imgs, yimgs = next_batch(train[0], train[1], batch_size)
_, c, p = sess.run([train_op, loss, weights], feed_dict = {X: imgs, TY: yimgs})
cost += c / batch_per_epoch
costs.append(cost)
return list(np.argmax(np.abs(p), axis=0)), costs
def AEFS(train, test, K, debug = True):
x_train, x_val, y_train, y_val = train_test_split(train[0], train[1], test_size = 0.1)
print("y_train.shape",y_train.shape)
bindices = []
bmse = 1e100
for alpha in [1e-3, 1e-1, 1e1, 1e3]:
print("alpha",alpha)
indices, _ = aefs_subset_selector(train, K)
mse = mse_check((train[0][:, indices], train[1]), (x_val[:, indices], y_val))
if bmse > mse:
bmse = mse
bindices = indices
if debug:
print(bindices, bmse)
return train[0][:, bindices], test[0][:, bindices]
#--------------------------------------------------------------------------------------------------------------------------------
def ETree(p_train_feature,p_train_label,p_test_feature,p_test_label,p_seed):
clf = ExtraTreesClassifier(n_estimators=50, random_state=p_seed)
# Training
clf.fit(p_train_feature, p_train_label)
# Training accuracy
print('Training accuracy:',clf.score(p_train_feature, np.array(p_train_label)))
print('Training accuracy:',accuracy_score(np.array(p_train_label),clf.predict(p_train_feature)))
#print('Training accuracy:',np.sum(clf.predict(p_train_feature)==np.array(p_train_label))/p_train_label.shape[0])
# Testing accuracy
print('Testing accuracy:',clf.score(p_test_feature, np.array(p_test_label)))
print('Testing accuracy:',accuracy_score(np.array(p_test_label),clf.predict(p_test_feature)))
#print('Testing accuracy:',np.sum(clf.predict(p_test_feature)==np.array(p_test_label))/p_test_label.shape[0])
#--------------------------------------------------------------------------------------------------------------------------------
def write_to_csv(p_data,p_path):
dataframe = pd.DataFrame(p_data)
dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',')
```
# 2. Loading data
```
data_path="./Dataset/Prostate_GE.mat"
Data = scipy.io.loadmat(data_path)
data_arr=Data['X']
label_arr=Data['Y'][:, 0]-1
Data=MinMaxScaler(feature_range=(0,1)).fit_transform(data_arr)
C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(Data,label_arr,test_size=0.2,random_state=seed)
print('Shape of C_train_x: ' + str(C_train_x.shape))
print('Shape of C_train_y: ' + str(C_train_y.shape))
print('Shape of C_test_x: ' + str(C_test_x.shape))
print('Shape of C_test_y: ' + str(C_test_y.shape))
key_feture_number=64
```
# 3. Model
```
train=(C_train_x,C_train_x)
test=(C_test_x,C_test_x)
start = time.clock()
C_train_selected_x, C_test_selected_x = AEFS((train[0], train[0]), (test[0], test[0]), key_feture_number)
time_cost=time.clock() - start
write_to_csv(np.array([time_cost]),"./log/AEFS_time"+str(key_feture_number)+".csv")
```
# 4. Classifying
### Extra Trees
```
train_feature=C_train_x
train_label=C_train_y
test_feature=C_test_x
test_label=C_test_y
print('Shape of train_feature: ' + str(train_feature.shape))
print('Shape of train_label: ' + str(train_label.shape))
print('Shape of test_feature: ' + str(test_feature.shape))
print('Shape of test_label: ' + str(test_label.shape))
p_seed=seed
ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature=C_train_selected_x
train_label=C_train_y
test_feature=C_test_selected_x
test_label=C_test_y
print('Shape of train_feature: ' + str(train_feature.shape))
print('Shape of train_label: ' + str(train_label.shape))
print('Shape of test_feature: ' + str(test_feature.shape))
print('Shape of test_label: ' + str(test_label.shape))
p_seed=seed
ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
# 6. Reconstruction loss
```
from sklearn.linear_model import LinearRegression
def mse_check(train, test):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean()
return MSELR
train_feature_tuple=(C_train_selected_x,C_train_x)
test_feature_tuple=(C_test_selected_x,C_test_x)
reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple)
print(reconstruction_loss)
```
| github_jupyter |
```
import h5py
import numpy as np
files = ['../Data/ModelNet40_train/ply_data_train0.h5',
'../Data/ModelNet40_train/ply_data_train1.h5',
'../Data/ModelNet40_train/ply_data_train2.h5',
'../Data/ModelNet40_train/ply_data_train3.h5',
'../Data/ModelNet40_train/ply_data_train4.h5']
#files = ['../Data/ModelNet10_train/modelnet10_train.h5']
d = []
l = []
for i in range(len(files)):
fh5 = h5py.File(files[0], 'r')
data = fh5['data'][:]
label = fh5['label'][:]
fh5.close()
if(i != 0):
d = np.append(d, data, axis=0)
l = np.append(l, label, axis=0)
else:
d = data
l = label
print d.shape
print l.shape
import matplotlib.pyplot as plt
plt.hist(l, bins=100)
plt.show()
from keras.utils import to_categorical
Y_train = to_categorical(l)
classes = Y_train.shape[1]
print Y_train.shape
print "Loaded dataset with %s classes"%(classes)
from tqdm import trange
# now we need to voxelize that point cloud...
def voxelize(dim, data):
# uncomment below if you have not already normalized your object to [0,1]^3
#m = max(x.min(), x.max(), key=abs)
#data /= m # This puts the data in [0,1]
data *= (dim/2) # This puts the data in [0,dim]
data += (dim/2)
data = np.asarray([[int(i[0]), int(i[1]), int(i[2])] for i in data])
data = np.unique(data, axis=1)
retval = np.zeros((dim, dim, dim))
for i in data:
retval[i[0]][i[1]][i[2]] = 1
retval = np.asarray([retval])
return retval
X_train = [voxelize(32, i) for i in d]
X_train = np.asarray(X_train)
X_train = np.reshape(X_train, (-1, 32, 32, 32, 1))
print X_train.shape
files = ['../Data/ModelNet40_test/ply_data_test0.h5',
'../Data/ModelNet40_test/ply_data_test1.h5']
d = []
l = []
for i in range(len(files)):
fh5 = h5py.File(files[0], 'r')
data = fh5['data'][:]
label = fh5['label'][:]
fh5.close()
if(i != 0):
d = np.append(d, data, axis=0)
l = np.append(l, label, axis=0)
else:
d = data
l = label
print d.shape
print l.shape
Y_test = to_categorical(l)
X_test = [voxelize(32, i) for i in d]
X_test = np.asarray(X_test)
X_test = np.reshape(X_test, (-1, 32, 32, 32, 1))
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Convolution3D, MaxPooling3D
from keras.layers import Conv3D
from keras.layers.core import Activation, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.regularizers import l2
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.optimizers import SGD
import random
import numpy as np
num_classes = classes
# Defining VoxNet in Keras 2
model = Sequential()
model.add(Conv3D(input_shape=(32, 32, 32, 1), filters=32,
kernel_size=(5,5,5), strides=(2, 2, 2)))
model.add(Activation(LeakyReLU(alpha=0.1)))
model.add(Dropout(rate=0.3))
model.add(Conv3D(filters=32, kernel_size=(3,3,3)))
model.add(Activation(LeakyReLU(alpha=0.1)))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None))
model.add(Dropout(rate=0.4))
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(units=num_classes, kernel_initializer='normal', activation='relu'))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=["accuracy"])
model.summary()
history = model.fit(x=X_train, y=Y_train, batch_size=16,
epochs=25, verbose=1, validation_data=(X_test, Y_test))
# serialize model to JSON
from keras.models import model_from_json
import os
#model_json = model.to_json()
#with open("voxnet40.json", "w") as json_file:
# json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("VoxNet-ModelNet40.h5")
print("Saved model to disk")
```
| github_jupyter |
# Sorting
### 1. Bubble: $O(n^2)$
repeatedly swapping the adjacent elements if they are in wrong order
### 2. Selection: $O(n^2)$
find largest number and place it in the correct order
### 3. Insertion: $O(n^2)$
### 4. Shell: $O(n^2)$
### 5. Merge: $O(n \log n)$
### 6. Quick: $O(n \log n)$
it is important to select proper pivot
### 7. Counting: $O(n)$
### 8. Radix: $O(n)$
### 9. Bucket: $O(n)$
---
# Bubble
```
def bubble(arr):
n = len(arr)
for i in range(n):
# (n-1)-(i): 뒤에서부터 i+1 번째 idx
# 0번째 -> 커서가 n-1까지 움직임
# 1번째 -> 커서가 n-1-1
for j in range(0, (n-1)-i):
print(j)
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
def bubble(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-1-(i+1))
arr = [64, 34, 25, 12, 22, 11, 90]
bubble(arr)
arr
def bubble2(arr):
n = len(arr)
for i in range(n):
swapped = False
for j in range(0, n-1-i):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
# 정렬 안된 부분이 있음
swapped = True
if swapped == False:
break
def b(arr):
n = len(arr)
for i in range(n):
swapped = False
for j in range(0, n-1-i):
if arr[j] > arr[j+1]:
swapped = True
arr[j], arr[j+1] = arr[j+1], arr[j]
if swapped == False:
return
```
# Selection Sorting
```
def Selection(arr):
n = len(arr)
for i in range(n-1, 0, -1):
positionOfMax=0
for loc in range(1, i+1):
if arr[loc] > arr[positionOfMax]:
positionOfMax = loc
arr[i], arr[loc] = arr[loc], arr[i]
# test code
arr = [54,26,93,17,77,31,44,55,20]
Selection(arr)
print(arr)
```
# Quick
```
# partition은 cur가 앞에서부터 high까지 순회하면서
def partition(arr, low, high):
i = low - 1
pivot = arr[high]
for cur in range(low, high):
print(cur, i)
if arr[cur] <= pivot:
i += 1
arr[i], arr[cur] = arr[cur], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return i+1
def QuickSort(arr, low, high):
if low < high:
pi = partition(arr, low, high)
# 절반 중 1
QuickSort(arr, low, pi-1)
# 절반 중 2
QuickSort(arr, pi+1, high)
# test code
arr = [10, 7, 8, 9, 1, 5]
n = len(arr)
QuickSort(arr, 0, n-1)
for i in range(n):
print(arr[i])
```
# Quick2
```
def partition(arr, start, end):
povot = arr[start]
i = start + 1
j = end -1
while True:
# i: traverse from begin
# j: traverse from end
# if arr[i](left side of pivot) smaller than pivot, then pass
while (i <= j and arr[i] <= pivot):
i += 1
# if arr[j](right side of pivot) larger than pivot, then pass
while (i <= j and arr[j] >= pivot):
j -= 1
if i <= j:
arr[i], arr[j] = arr[j], arr[i]
print(start)
# i, j가 엇갈리면 left side of pivot의 맨 오른쪽 값과 pivot(맨앞) 자리바꿈
else:
arr[start], arr[j] = arr[j], arr[start]
return j
def quicksort(arr, start, end):
if end - start > 1:
# p: pivot location
p = partition(arr, start, end)
quicksort(arr, start=start, end=p)
quicksort(arr, start=p+1, end=end)
```
# 계수정렬 Counting Sort
- reference: https://www.geeksforgeeks.org/radix-sort/
- count_arr: count how many each of 0,1,2,...,n is in arr
- iter 0, 1, ..., n
- fill ans with 0, 1, ..., n
```
# 핵심은 counting arr생성
# 갯수만큼 itter
def counting_sort(arr, max_val):
count_arr = [0 for _ in range(max_val)]
for num in arr:
count_arr[num] += 1
i = 0
for num in range(max_val):
iter_n = count_arr[num]
for _ in range(iter_n):
arr[i] = num
i += 1
return arr
# test code
arr = [5,1,5,1,1,2,4,3,4,3,2]
max_val = 6
counting_sort(arr, max_val)
```
# 기수정렬 Radix Sort
## 핵심
- `숫자 //` 원하는 `digit`(첫쨰 자리: 1, 둘째 자리: 10, ...) `% 10`
- `// 10^(digit-1)`: 끝자리가 내가 원하는 digit의 숫자가 됨
- eg. 25948의 끝에서 셋째 자리 9를 끝자리로 만드려면, 25948 // 10^(3-1) = 259
- `%10`: 마지막 끝자리만 남김
```
4378 // 10**(4-1) % 10
def SortingByDigit(arr, exp):
n = len(arr)
output = [0 for _ in range(n)]
count = [0 for _ in range(10)]
for num in arr:
last_digit = num // exp % 10
count[last_digit] += 1
i = 1
while i < max_:
count[i] += count[i-1]
i += 1
print('digit:', np.log10(exp)+1)
print(count)
# 왜 거꾸로 iter? 마지막 가장 큰 digit에 근거해 배열할 때 필요
i = n-1
while i >= 0:
last_digit = (arr[i] // exp) % 10
idx_by_cum = count[last_digit]
output[idx_by_cum - 1] = arr[i]
count[last_digit] -= 1
i -= 1
print(count)
# update arr
i = 0
for i in range(0,len(arr)):
arr[i] = output[i]
# arr = [i for i in output]
print(arr)
print()
def radixSort(arr):
max_ = max(arr)
exp = 1
while (max_ // exp) > 0:
print(max_, exp)
SortingByDigit(arr, exp)
exp *= 10
# test code
arr = [170, 5145, 3145, 2145, 802, 24]
radixSort(arr)
```
| github_jupyter |
# Physically labeled data: pyfocs single-ended examples
Finally, after all of that (probably confusing) work we can map the data to physical coordinates.
```
import xarray as xr
import pyfocs
import os
```
# 1. Load data
## 1.1 Configuration files
As in the previous example we will load and prepare the configuration files. This time we will load all the configuration files.
Physically labeled data is triggered by setting the below flag within the configuration file.
```python
final_flag = True
```
```
dir_example = os.path.join('../tests/data/')
# Grab a configuration file for the twisted pair pvc fiber and for the stainless steel fiber
config_names = [
'example_configuration_steelfiber.yml',
'example_twistedpair_bothwls.yml',
'example_twistedpair_p1wls.yml',
'example_twistedpair_p2wls.yml',
]
cfg_fname = os.path.join(dir_example, config_names[0])
cfg_ss, lib_ss = pyfocs.check.config(cfg_fname, ignore_flags=True)
cfg_fname = os.path.join(dir_example, config_names[1])
cfg_both, lib_both = pyfocs.check.config(cfg_fname, ignore_flags=True)
cfg_fname = os.path.join(dir_example, config_names[2])
cfg_p1, lib_p1 = pyfocs.check.config(cfg_fname, ignore_flags=True)
cfg_fname = os.path.join(dir_example, config_names[3])
cfg_p2, lib_p2 = pyfocs.check.config(cfg_fname, ignore_flags=True)
```
## 1.2 Data
- In this case we only use a single twisted pair, p1, since it is closer to the DTS device in LAF space yielding a less noisy signal.
- Additionally, we will load the paired heated-unheated stainless steel fiber that has been interpolated to a common spatial index.
```
ds_p1 = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_p1-wls_unheated.nc'))
ds_p2 = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_p2-wls_unheated.nc'))
ds_cold = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_ss-wls_unheated.nc'))
ds_heat = xr.open_dataset(os.path.join(dir_example, 'multifiledemo', 'final', 'multifiledemo_final_20190722-0000_ss-wls_heated.nc'))
print('=================')
print('Unheated fibers - Twisted PVC fiber, pair 1')
print(ds_p1)
print('')
print('=================')
print('Unheated fibers - Twisted PVC fiber, pair 2')
print(ds_p2)
print('')
print('=================')
print('Unheated fibers - stainless steel')
print(ds_cold)
print('')
print('=================')
print('Heated fibers - stainless steel')
print(ds_heat)
print('')
```
Here we see that all datasets now have `x`, `y`, and `z` coordinates which are labeled using the `xyz` multiindex. Other quantities have been dropped.
The netcdf files are also now labeled differently. Channel information has been excluded and there is now a label on the location type at the end of the file name.
# 2. Calculate wind speed
## 2.1 Construct the power variable
Here I will construct a data variable of power. The details on what is happening here are not important besides `power` is a data variable with dimensions of LAF. The wind speed code can accept `power` as a DataArray with dimensions shared with `cal_temp` or as a single float.
```
import numpy as np
power_loc = {
'1': [1892.5, 2063.5],
'2': [2063.5, 2205.5],
'3': [2207.0, 2361.],
'4': [2361., 2524.]}
power_vals = {
'1': 6.1,
'2': 6.4,
'3': 4.7,
'4': 5.4,}
ds_heat['power'] = ('LAF', np.zeros_like(ds_heat.LAF))
for p in power_vals:
laf_mask = ((ds_heat.LAF > power_loc[p][0]) & (ds_heat.LAF < power_loc[p][1]))
ds_heat['power'] = xr.where(laf_mask, np.ones_like(ds_heat.LAF.values) * power_vals[p], ds_heat.power.values)
```
## 2.2 Calculate wind speed
```
wind_speed = pyfocs.wind_speed.calculate(ds_heat.cal_temp, ds_cold.cal_temp, ds_heat.power)
```
## 2.3 Split up wind speed based
Wind speed is most efficiently measured in the direction orthogonal to the fiber. Since we have fibers that are orthogonal to each other that means we effectively measured wind in two different directions. We represent that here by combining sections that are parallel to each other.
```
cross_valley_components = ['OR_SE', 'OR_NW']
logic = [wind_speed.unheated == l for l in cross_valley_components]
logic = xr.concat(logic, dim='locations').any(dim='locations')
wind_speed_cross_valley = wind_speed.where(logic, drop=True)
along_valley_components = ['OR_SW2', 'OR_SW1', 'OR_NE1', 'OR_NE2']
logic = [wind_speed.unheated == l for l in along_valley_components]
logic = xr.concat(logic, dim='locations').any(dim='locations')
wind_speed_along_valley = wind_speed.where(logic, drop=True)
```
## 2.4 Create a Dataset that contains all unheated data
```
unheated = xr.concat([ds_cold, ds_p1], dim='xyz', coords='different')
```
# 3. Plot your Fiber Optic Distributed Sensing data
## 3.1 Wind speed and temperature
```
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 6),)
spec = fig.add_gridspec(ncols=4,
nrows=2,
width_ratios=[1, 0.08, 0.04, 0.08],
hspace=0.18, wspace=0.25,
)
ax_ew_cbar = fig.add_subplot(spec[0, 3])
ax_ns_cbar = fig.add_subplot(spec[1, 3])
ax_t_cbar = fig.add_subplot(spec[:, 1])
ax_temp = fig.add_subplot(spec[:, 0])
im = ax_temp.scatter(unheated.x, unheated.y, s=10,
c=unheated.mean(dim='time').cal_temp.values,
cmap='viridis', vmin=8.5, vmax=10)
ax_temp.set_ylabel('Relative Northing (m)')
ax_temp.set_xlabel('Relative Easting (m)')
plt.colorbar(im, cax=ax_t_cbar, extend='both')
ax_t_cbar.set_ylabel('Temperature (C)')
ax_temp.set_title('a) LOVE19 Outer Array', loc='left')
im = ax_temp.scatter(wind_speed_along_valley.x * 1.1,
wind_speed_along_valley.y * 1.1,
s=10,
c=wind_speed_along_valley.mean(dim='time').values,
cmap='Oranges', vmin=0.5, vmax=4)
plt.colorbar(im, cax=ax_ew_cbar, extend='max')
ax_ew_cbar.set_ylabel('Along valley wind (m/s)')
im = ax_temp.scatter(wind_speed_cross_valley.x * 1.1,
wind_speed_cross_valley.y * 1.1,
s=10,
c=wind_speed_cross_valley.mean(dim='time').values,
cmap='Blues', vmin=0.5, vmax=4)
plt.colorbar(im, cax=ax_ns_cbar, extend='max')
ax_ns_cbar.set_ylabel('Cross valley wind (m/s)')
```
## 3.2 Biases in space
```
ds_p2 = ds_p2.interp_like(ds_p1)
fig = plt.figure(figsize=(8, 6),)
spec = fig.add_gridspec(ncols=2,
nrows=1,
width_ratios=[1, 0.1],
hspace=0.18, wspace=0.25,
)
ax_t_cbar = fig.add_subplot(spec[:, 1])
ax_temp = fig.add_subplot(spec[:, 0])
im = ax_temp.scatter(
ds_p1.x,
ds_p1.y,
s=10,
c=(ds_p1.cal_temp - ds_p2.cal_temp).mean(dim='time').values,
cmap='RdBu', vmin=-0.5, vmax=0.5)
ax_temp.set_ylabel('Relative Northing (m)')
ax_temp.set_xlabel('Relative Easting (m)')
plt.colorbar(im, cax=ax_t_cbar, extend='both')
ax_t_cbar.set_ylabel('p1 - p2 (K)')
ax_temp.set_title('LOVE19 Twisted PVC Fiber Bias', loc='left')
```
Here we can see that the reference sections are a bit misleading. While they evaluate to effectively zero bias, there are substantial biases between what should be replicate measurements. We have found this to be typical of DTS observations. The cause and correction is a subject of on-going research but we highlight as a final word of caution on DTS. The method is excpetionally powerful but is very far from a push-button operation. It requires a substantial investment in time for all steps: setting up the fiber takes much longer than other instruments, preparing the dataset is a long process even with the tools provided by pyfocs, and it is still a new technique that is subject to uncertainties that are not even known to the community.
| github_jupyter |
# Machine Translation English-German Example Using SageMaker Seq2Seq
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Download dataset and preprocess](#Download-dataset-and-preprocess)
3. [Training the Machine Translation model](#Training-the-Machine-Translation-model)
4. [Inference](#Inference)
## Introduction
Welcome to our Machine Translation end-to-end example! In this demo, we will train a English-German translation model and will test the predictions on a few examples.
SageMaker Seq2Seq algorithm is built on top of [Sockeye](https://github.com/awslabs/sockeye), a sequence-to-sequence framework for Neural Machine Translation based on MXNet. SageMaker Seq2Seq implements state-of-the-art encoder-decoder architectures which can also be used for tasks like Abstractive Summarization in addition to Machine Translation.
To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
## Setup
Let's start by specifying:
- The S3 bucket and prefix that you want to use for training and model data. **This should be within the same region as the Notebook Instance, training, and hosting.**
- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp in the cell below with a the appropriate full IAM role arn string(s).
```
# S3 bucket and prefix
bucket = '<your_s3_bucket_name_here>'
prefix = 'sagemaker/<your_s3_prefix_here>' # E.g.'sagemaker/seq2seq/eng-german'
import boto3
import re
from sagemaker import get_execution_role
role = get_execution_role()
```
Next, we'll import the Python libraries we'll need for the remainder of the exercise.
```
from time import gmtime, strftime
import time
import numpy as np
import os
import json
# For plotting attention matrix later on
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
```
## Download dataset and preprocess
In this notebook, we will train a English to German translation model on a dataset from the
[Conference on Machine Translation (WMT) 2017](http://www.statmt.org/wmt17/).
```
%%bash
wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.de.gz & \
wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.en.gz & wait
gunzip corpus.tc.de.gz & \
gunzip corpus.tc.en.gz & wait
mkdir validation
curl http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/dev.tgz | tar xvzf - -C validation
```
Please note that it is a common practise to split words into subwords using Byte Pair Encoding (BPE). Please refer to [this](https://github.com/awslabs/sockeye/tree/master/tutorials/wmt) tutorial if you are interested in performing BPE.
Since training on the whole dataset might take several hours/days, for this demo, let us train on the **first 10,000 lines only**. Don't run the next cell if you want to train on the complete dataset.
```
!head -n 10000 corpus.tc.en > corpus.tc.en.small
!head -n 10000 corpus.tc.de > corpus.tc.de.small
```
Now, let's use the preprocessing script `create_vocab_proto.py` (provided with this notebook) to create vocabulary mappings (strings to integers) and convert these files to x-recordio-protobuf as required for training by SageMaker Seq2Seq.
Uncomment the cell below and run to see check the arguments this script expects.
```
%%bash
# python3 create_vocab_proto.py -h
```
The cell below does the preprocessing. If you are using the complete dataset, the script might take around 10-15 min on an m4.xlarge notebook instance. Remove ".small" from the file names for training on full datasets.
```
%%time
%%bash
python3 create_vocab_proto.py \
--train-source corpus.tc.en.small \
--train-target corpus.tc.de.small \
--val-source validation/newstest2014.tc.en \
--val-target validation/newstest2014.tc.de
```
The script will output 4 files, namely:
- train.rec : Contains source and target sentences for training in protobuf format
- val.rec : Contains source and target sentences for validation in protobuf format
- vocab.src.json : Vocabulary mapping (string to int) for source language (English in this example)
- vocab.trg.json : Vocabulary mapping (string to int) for target language (German in this example)
Let's upload the pre-processed dataset and vocabularies to S3
```
def upload_to_s3(bucket, prefix, channel, file):
s3 = boto3.resource('s3')
data = open(file, "rb")
key = prefix + "/" + channel + '/' + file
s3.Bucket(bucket).put_object(Key=key, Body=data)
upload_to_s3(bucket, prefix, 'train', 'train.rec')
upload_to_s3(bucket, prefix, 'validation', 'val.rec')
upload_to_s3(bucket, prefix, 'vocab', 'vocab.src.json')
upload_to_s3(bucket, prefix, 'vocab', 'vocab.trg.json')
region_name = boto3.Session().region_name
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/seq2seq:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/seq2seq:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/seq2seq:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/seq2seq:latest'}
container = containers[region_name]
print('Using SageMaker Seq2Seq container: {} ({})'.format(container, region_name))
```
## Training the Machine Translation model
```
job_name = 'seq2seq-en-de-p2-xlarge-' + strftime("%Y-%m-%d-%H", gmtime())
print("Training job", job_name)
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/".format(bucket, prefix)
},
"ResourceConfig": {
# Seq2Seq does not support multiple machines. Currently, it only supports single machine, multiple GPUs
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge", # We suggest one of ["ml.p2.16xlarge", "ml.p2.8xlarge", "ml.p2.xlarge"]
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
# Please refer to the documentation for complete list of parameters
"max_seq_len_source": "60",
"max_seq_len_target": "60",
"optimized_metric": "bleu",
"batch_size": "64", # Please use a larger batch size (256 or 512) if using ml.p2.8xlarge or ml.p2.16xlarge
"checkpoint_frequency_num_batches": "1000",
"rnn_num_hidden": "512",
"num_layers_encoder": "1",
"num_layers_decoder": "1",
"num_embed_source": "512",
"num_embed_target": "512",
"checkpoint_threshold": "3",
"max_num_batches": "2100"
# Training will stop after 2100 iterations/batches.
# This is just for demo purposes. Remove the above parameter if you want a better model.
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 48 * 3600
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/train/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
},
{
"ChannelName": "vocab",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/vocab/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/validation/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
}
]
}
sagemaker_client = boto3.Session().client(service_name='sagemaker')
sagemaker_client.create_training_job(**create_training_params)
status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
# if the job failed, determine why
if status == 'Failed':
message = sage.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
```
> Now wait for the training job to complete and proceed to the next step after you see model artifacts in your S3 bucket.
You can jump to [Use a pretrained model](#Use-a-pretrained-model) as training might take some time.
## Inference
A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means translating sentence(s) from English to German.
This section involves several steps,
- Create model - Create a model using the artifact (model.tar.gz) produced by training
- Create Endpoint Configuration - Create a configuration defining an endpoint, using the above model
- Create Endpoint - Use the configuration to create an inference endpoint.
- Perform Inference - Perform inference on some input data using the endpoint.
### Create model
We now create a SageMaker Model from the training output. Using the model, we can then create an Endpoint Configuration.
```
use_pretrained_model = False
```
### Use a pretrained model
#### Please uncomment and run the cell below if you want to use a pretrained model, as training might take several hours/days to complete.
```
# use_pretrained_model = True
# model_name = "pretrained-en-de-model"
# !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/model.tar.gz > model.tar.gz
# !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/vocab.src.json > vocab.src.json
# !curl https://s3-us-west-2.amazonaws.com/gsaur-seq2seq-data/seq2seq/eng-german/full-nb-translation-eng-german-p2-16x-2017-11-24-22-25-53/output/vocab.trg.json > vocab.trg.json
# upload_to_s3(bucket, prefix, 'pretrained_model', 'model.tar.gz')
# model_data = "s3://{}/{}/pretrained_model/model.tar.gz".format(bucket, prefix)
%%time
sage = boto3.client('sagemaker')
if not use_pretrained_model:
info = sage.describe_training_job(TrainingJobName=job_name)
model_name=job_name
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_name)
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
```
### Create endpoint configuration
Use the model to create an endpoint configuration. The endpoint configuration also contains information about the type and number of EC2 instances to use when hosting the model.
Since SageMaker Seq2Seq is based on Neural Nets, we could use an ml.p2.xlarge (GPU) instance, but for this example we will use a free tier eligible ml.m4.xlarge.
```
from time import gmtime, strftime
endpoint_config_name = 'Seq2SeqEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
```
### Create endpoint
Lastly, we create the endpoint that serves up model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 10-15 minutes to complete.
```
%%time
import time
endpoint_name = 'Seq2SeqEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = sage.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = sage.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
# wait until the status has changed
sage.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
# print the status of the endpoint
endpoint_response = sage.describe_endpoint(EndpointName=endpoint_name)
status = endpoint_response['EndpointStatus']
print('Endpoint creation ended with EndpointStatus = {}'.format(status))
if status != 'InService':
raise Exception('Endpoint creation failed.')
```
If you see the message,
> Endpoint creation ended with EndpointStatus = InService
then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.
We will finally create a runtime object from which we can invoke the endpoint.
```
runtime = boto3.client(service_name='runtime.sagemaker')
```
# Perform Inference
### Using JSON format for inference (Suggested for a single or small number of data instances)
#### Note that you don't have to convert string to text using the vocabulary mapping for inference using JSON mode
```
sentences = ["you are so good !",
"can you drive a car ?",
"i want to watch a movie ."
]
payload = {"instances" : []}
for sent in sentences:
payload["instances"].append({"data" : sent})
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload))
response = response["Body"].read().decode("utf-8")
response = json.loads(response)
print(response)
```
### Retrieving the Attention Matrix
Passing `"attention_matrix":"true"` in `configuration` of the data instance will return the attention matrix.
```
sentence = 'can you drive a car ?'
payload = {"instances" : [{
"data" : sentence,
"configuration" : {"attention_matrix":"true"}
}
]}
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload))
response = response["Body"].read().decode("utf-8")
response = json.loads(response)['predictions'][0]
source = sentence
target = response["target"]
attention_matrix = np.array(response["matrix"])
print("Source: %s \nTarget: %s" % (source, target))
# Define a function for plotting the attentioan matrix
def plot_matrix(attention_matrix, target, source):
source_tokens = source.split()
target_tokens = target.split()
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens)
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plot_matrix(attention_matrix, target, source)
```
### Using Protobuf format for inference (Suggested for efficient bulk inference)
Reading the vocabulary mappings as this mode of inference accepts list of integers and returns list of integers.
```
import io
import tempfile
from record_pb2 import Record
from create_vocab_proto import vocab_from_json, reverse_vocab, write_recordio, list_to_record_bytes, read_next
source = vocab_from_json("vocab.src.json")
target = vocab_from_json("vocab.trg.json")
source_rev = reverse_vocab(source)
target_rev = reverse_vocab(target)
sentences = ["this is so cool",
"i am having dinner .",
"i am sitting in an aeroplane .",
"come let us go for a long drive ."]
```
Converting the string to integers, followed by protobuf encoding:
```
# Convert strings to integers using source vocab mapping. Out-of-vocabulary strings are mapped to 1 - the mapping for <unk>
sentences = [[source.get(token, 1) for token in sentence.split()] for sentence in sentences]
f = io.BytesIO()
for sentence in sentences:
record = list_to_record_bytes(sentence, [])
write_recordio(f, record)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-recordio-protobuf',
Body=f.getvalue())
response = response["Body"].read()
```
Now, parse the protobuf response and convert list of integers back to strings
```
def _parse_proto_response(received_bytes):
output_file = tempfile.NamedTemporaryFile()
output_file.write(received_bytes)
output_file.flush()
target_sentences = []
with open(output_file.name, 'rb') as datum:
next_record = True
while next_record:
next_record = read_next(datum)
if next_record:
rec = Record()
rec.ParseFromString(next_record)
target = list(rec.features["target"].int32_tensor.values)
target_sentences.append(target)
else:
break
return target_sentences
targets = _parse_proto_response(response)
resp = [" ".join([target_rev.get(token, "<unk>") for token in sentence]) for
sentence in targets]
print(resp)
```
# Stop / Close the Endpoint (Optional)
Finally, we should delete the endpoint before we close the notebook.
```
sage.delete_endpoint(EndpointName=endpoint_name)
```
| github_jupyter |
# Let's Grow your Own Inner Core!
### Choose a model in the list:
- geodyn_trg.TranslationGrowthRotation()
- geodyn_static.Hemispheres()
### Choose a proxy type:
- age
- position
- phi
- theta
- growth rate
### set the parameters for the model : geodynModel.set_parameters(parameters)
### set the units : geodynModel.define_units()
### Choose a data set:
- data.SeismicFromFile(filename) # Lauren's data set
- data.RandomData(numbers_of_points)
- data.PerfectSamplingEquator(numbers_of_points)
organized on a cartesian grid. numbers_of_points is the number of points along the x or y axis. The total number of points is numbers_of_points**2*pi/4
- as a special plot function to show streamlines: plot_c_vec(self,modelgeodyn)
- data.PerfectSamplingEquatorRadial(Nr, Ntheta)
same than below, but organized on a polar grid, not a cartesian grid.
### Extract the info:
- calculate the proxy value for all points of the data set: geodyn.evaluate_proxy(data_set, geodynModel)
- extract the positions as numpy arrays: extract_rtp or extract_xyz
- calculate other variables: positions.angular_distance_to_point(t,p, t_point, p_point)
```
%matplotlib inline
# import statements
import numpy as np
import matplotlib.pyplot as plt #for figures
from mpl_toolkits.basemap import Basemap #to render maps
import math
import json #to write dict with parameters
from GrowYourIC import positions, geodyn, geodyn_trg, geodyn_static, plot_data, data
plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis')
cm2 = plt.cm.get_cmap('winter')
```
## Define the geodynamical model
Un-comment one of the model
```
## un-comment one of them
geodynModel = geodyn_trg.TranslationGrowthRotation() #can do all the models presented in the paper
# geodynModel = geodyn_static.Hemispheres() #this is a static model, only hemispheres.
```
Change the values of the parameters to get the model you want (here, parameters for .TranslationGrowthRotation())
```
age_ic_dim = 1e9 #in years
rICB_dim = 1221. #in km
v_g_dim = rICB_dim/age_ic_dim # in km/years #growth rate
print("Growth rate is {:.2e} km/years".format(v_g_dim))
v_g_dim_seconds = v_g_dim*1e3/(np.pi*1e7)
translation_velocity_dim = 0.8*v_g_dim_seconds#4e-10 #0.8*v_g_dim_seconds#4e-10 #m.s, value for today's Earth with Q_cmb = 10TW (see Alboussiere et al. 2010)
time_translation = rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)
maxAge = 2.*time_translation/1e6
print("The translation recycles the inner core material in {0:.2e} million years".format(maxAge))
print("Translation velocity is {0:.2e} km/years".format(translation_velocity_dim*np.pi*1e7/1e3))
units = None #we give them already dimensionless parameters.
rICB = 1.
age_ic = 1.
omega = 0.#0.5*np.pi/200e6*age_ic_dim#0.5*np.pi #0. #0.5*np.pi/200e6*age_ic_dim# 0.#0.5*np.pi#0.#0.5*np.pi/200e6*age_ic_dim #0. #-0.5*np.pi # Rotation rates has to be in ]-np.pi, np.pi[
print("Rotation rate is {:.2e}".format(omega))
velocity_amplitude = translation_velocity_dim*age_ic_dim*np.pi*1e7/rICB_dim/1e3
velocity_center = [0., 100.]#center of the eastern hemisphere
velocity = geodyn_trg.translation_velocity(velocity_center, velocity_amplitude)
exponent_growth = 1.#0.1#1
print(v_g_dim, velocity_amplitude, omega/age_ic_dim*180/np.pi*1e6)
```
Define a proxy type, and a proxy name (to be used in the figures to annotate the axes)
You can re-define it later if you want (or define another proxy_type2 if needed)
```
proxy_type = "age"#"growth rate"
proxy_name = "age (Myears)" #growth rate (km/Myears)"
proxy_lim = [0, maxAge] #or None
#proxy_lim = None
fig_name = "figures/test_" #to name the figures
print(rICB, age_ic, velocity_amplitude, omega, exponent_growth, proxy_type)
print(velocity)
```
### Parameters for the geodynamical model
This will input the different parameters in the model.
```
parameters = dict({'units': units,
'rICB': rICB,
'tau_ic':age_ic,
'vt': velocity,
'exponent_growth': exponent_growth,
'omega': omega,
'proxy_type': proxy_type})
geodynModel.set_parameters(parameters)
geodynModel.define_units()
param = parameters
param['vt'] = parameters['vt'].tolist() #for json serialization
# write file with parameters, readable with json, byt also human-readable
with open(fig_name+'parameters.json', 'w') as f:
json.dump(param, f)
print(parameters)
```
## Different data set and visualisations
### Perfect sampling at the equator (to visualise the flow lines)
You can add more points to get a better precision.
```
npoints = 10 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingEquator(npoints, rICB = 1.)
data_set.method = "bt_point"
proxy = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="age", verbose = False)
data_set.plot_c_vec(geodynModel, proxy=proxy, cm=cm, nameproxy="age (Myears)")
plt.savefig(fig_name+"equatorial_plot.pdf", bbox_inches='tight')
```
### Perfect sampling in the first 100km (to visualise the depth evolution)
```
data_meshgrid = data.Equator_upperpart(10,10)
data_meshgrid.method = "bt_point"
proxy_meshgrid = geodyn.evaluate_proxy(data_meshgrid, geodynModel, proxy_type=proxy_type, verbose = False)
#r, t, p = data_meshgrid.extract_rtp("bottom_turning_point")
fig3, ax3 = plt.subplots(figsize=(8, 2))
X, Y, Z = data_meshgrid.mesh_RPProxy(proxy_meshgrid)
sc = ax3.contourf(Y, rICB_dim*(1.-X), Z, 100, cmap=cm)
sc2 = ax3.contour(sc, levels=sc.levels[::15], colors = "k")
ax3.set_ylim(-0, 120)
fig3.gca().invert_yaxis()
ax3.set_xlim(-180,180)
cbar = fig3.colorbar(sc)
#cbar.set_clim(0, maxAge)
cbar.set_label(proxy_name)
ax3.set_xlabel("longitude")
ax3.set_ylabel("depth below ICB (km)")
plt.savefig(fig_name+"meshgrid.pdf", bbox_inches='tight')
npoints = 20 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingSurface(npoints, rICB = 1., depth=0.01)
data_set.method = "bt_point"
proxy_surface = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose = False)
#r, t, p = data_set.extract_rtp("bottom_turning_point")
X, Y, Z = data_set.mesh_TPProxy(proxy_surface)
## map
m, fig = plot_data.setting_map()
y, x = m(Y, X)
sc = m.contourf(y, x, Z, 30, cmap=cm, zorder=2, edgecolors='none')
plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name))
cbar = plt.colorbar(sc)
cbar.set_label(proxy_name)
fig.savefig(fig_name+"map_surface.pdf", bbox_inches='tight')
```
### Random data set, in the first 100km - bottom turning point only
#### Calculate the data
```
# random data set
data_set_random = data.RandomData(300)
data_set_random.method = "bt_point"
proxy_random = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type=proxy_type, verbose=False)
data_path = "../GrowYourIC/data/"
geodynModel.data_path = data_path
if proxy_type == "age":
# ## domain size and Vp
proxy_random_size = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="domain_size", verbose=False)
proxy_random_dV = geodyn.evaluate_proxy(data_set_random, geodynModel, proxy_type="dV_V", verbose=False)
r, t, p = data_set_random.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *velocity_center)
## map
m, fig = plot_data.setting_map()
x, y = m(p, t)
sc = m.scatter(x, y, c=proxy_random,s=8, zorder=10, cmap=cm, edgecolors='none')
plt.title("Dataset: {},\n geodynamic model: {}".format(data_set_random.name, geodynModel.name))
cbar = plt.colorbar(sc)
cbar.set_label(proxy_name)
fig.savefig(fig_name+data_set_random.shortname+"_map.pdf", bbox_inches='tight')
## phi and distance plots
fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0))
sc1 = ax[0,0].scatter(p, proxy_random, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0)
phi = np.linspace(-180,180, 50)
#analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
#ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2)
ax[0,0].set_xlabel("longitude")
ax[0,0].set_ylabel(proxy_name)
if proxy_lim is not None:
ax[0,0].set_ylim(proxy_lim)
sc2 = ax[0,1].scatter(dist, proxy_random, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0)
ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
phi = np.linspace(-90,90, 100)
if proxy_type == "age":
analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2)
analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2)
ax[0,1].set_xlim([0,180])
ax[0,0].set_xlim([-180,180])
cbar = fig.colorbar(sc1)
cbar.set_label("longitude: abs(theta)")
if proxy_lim is not None:
ax[0,1].set_ylim(proxy_lim)
## figure with domain size and Vp
if proxy_type == "age":
sc3 = ax[1,0].scatter(dist, proxy_random_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0)
ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
ax[1,0].set_ylabel("domain size (m)")
ax[1,0].set_xlim([0,180])
ax[1,0].set_ylim([0, 2500.000])
sc4 = ax[1,1].scatter(dist, proxy_random_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0)
ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
ax[1,1].set_ylabel("dV/V")
ax[1,1].set_xlim([0,180])
ax[1,1].set_ylim([-0.017, -0.002])
fig.savefig(fig_name +data_set_random.shortname+ '_long_dist.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(8, 2))
sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy_random, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
if proxy_lim is not None:
cbar.set_clim(0, maxAge)
ax.set_xlabel("longitude")
ax.set_ylabel("depth below ICB (km)")
cbar.set_label(proxy_name)
fig.savefig(fig_name+data_set_random.shortname+"_depth.pdf", bbox_inches='tight')
```
### Real Data set from Waszek paper
```
## real data set
data_set = data.SeismicFromFile("../GrowYourIC/data/WD11.dat")
data_set.method = "bt_point"
proxy2 = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type=proxy_type, verbose=False)
if proxy_type == "age":
## domain size and DV/V
proxy_size = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="domain_size", verbose=False)
proxy_dV = geodyn.evaluate_proxy(data_set, geodynModel, proxy_type="dV_V", verbose=False)
r, t, p = data_set.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *velocity_center)
## map
m, fig = plot_data.setting_map()
x, y = m(p, t)
sc = m.scatter(x, y, c=proxy2,s=8, zorder=10, cmap=cm, edgecolors='none')
plt.title("Dataset: {},\n geodynamic model: {}".format(data_set.name, geodynModel.name))
cbar = plt.colorbar(sc)
cbar.set_label(proxy_name)
fig.savefig(fig_name+data_set.shortname+"_map.pdf", bbox_inches='tight')
## phi and distance plots
fig, ax = plt.subplots(2,2, figsize=(8.0, 5.0))
sc1 = ax[0,0].scatter(p, proxy2, c=abs(t),s=3, cmap=cm2, vmin =-0, vmax =90, linewidth=0)
phi = np.linspace(-180,180, 50)
#analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
#ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2)
ax[0,0].set_xlabel("longitude")
ax[0,0].set_ylabel(proxy_name)
if proxy_lim is not None:
ax[0,0].set_ylim(proxy_lim)
sc2 = ax[0,1].scatter(dist, proxy2, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0)
ax[0,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
phi = np.linspace(-90,90, 100)
if proxy_type == "age":
analytic_equator = np.maximum(2*np.sin((-phi)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
ax[0,1].plot(phi+90,analytic_equator, 'r', linewidth=2)
analytic_equator = np.maximum(2*np.sin((phi-10)*np.pi/180.)*rICB_dim*1e3/translation_velocity_dim /(np.pi*1e7)/1e6,0.)
ax[0,0].plot(phi,analytic_equator, 'r', linewidth=2)
ax[0,1].set_xlim([0,180])
ax[0,0].set_xlim([-180,180])
cbar = fig.colorbar(sc1)
cbar.set_label("longitude: abs(theta)")
if proxy_lim is not None:
ax[0,1].set_ylim(proxy_lim)
## figure with domain size and Vp
if proxy_type == "age":
sc3 = ax[1,0].scatter(dist, proxy_size, c=abs(t), cmap=cm2, vmin =-0, vmax =90, s=3, linewidth=0)
ax[1,0].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
ax[1,0].set_ylabel("domain size (m)")
ax[1,0].set_xlim([0,180])
ax[1,0].set_ylim([0, 2500.000])
sc4 = ax[1,1].scatter(dist, proxy_dV, c=abs(t), cmap=cm2, vmin=-0, vmax =90, s=3, linewidth=0)
ax[1,1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
ax[1,1].set_ylabel("dV/V")
ax[1,1].set_xlim([0,180])
ax[1,1].set_ylim([-0.017, -0.002])
fig.savefig(fig_name + data_set.shortname+'_long_dist.pdf', bbox_inches='tight')
fig, ax = plt.subplots(figsize=(8, 2))
sc=ax.scatter(p,rICB_dim*(1.-r), c=proxy2, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
if proxy_lim is not None:
cbar.set_clim(0, maxAge)
ax.set_xlabel("longitude")
ax.set_ylabel("depth below ICB (km)")
cbar.set_label(proxy_name)
fig.savefig(fig_name+data_set.shortname+"_depth.pdf", bbox_inches='tight')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/danzerzine/seospider-colab/blob/main/Running_screamingfrog_SEO_spider_in_Colab_notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Запуск SEO бота Screaming Frog SEO spider в облаке через Google Colab
-------------
> *Protip: под задачу для крупного сайта лучше всего подходят High RAM (25GB) инстансы без GPU/TPU, доступные в PRO подписке*
###Косметическое улучшение: добавляем перенос строки для длинных однострочных команд
```
from IPython.display import HTML, display
def set_css():
display(HTML('''
<style>
pre {
white-space: pre-wrap;
}
</style>
'''))
get_ipython().events.register('pre_run_cell', set_css)
```
###Подключаем Google Drive в котором хранятся конфиги бота и куда будут сохраняться результаты обхода
```
from google.colab import drive
drive.mount('/content/drive')
```
###Узнаем внешний IP инстанса
чтобы затем ручками добавить его в исключения файерволла cloudflare -- иначе очень быстро упремся в rate limit и нам начнут показывать страницу с проверкой на человекообразность
```
!wget -qO- http://ipecho.net/plain | xargs echo && wget -qO - icanhazip.com
```
###Устанавливаем последнюю версию seo spider, делаем мелкие дела по хозяйству
* Обновляем установленные linux пакеты
* Копируем настройки с десктопной версии SEO spider в локальную папку инстанса (это нужно чтобы передать токены авторизации к google search console, GA и так далее)
```
#@title Settings directory on GDrive { vertical-output: true, display-mode: "both" }
settings_path = "" #@param {type:"string"}
!wget https://download.screamingfrog.co.uk/products/seo-spider/screamingfrogseospider_16.3_all.deb
!apt-get install screamingfrogseospider_16.3_all.deb
!sudo apt-get update && sudo apt-get upgrade -y
!mkdir -p ~/.ScreamingFrogSEOSpider
!cp -r $settings_path/* ~/.ScreamingFrogSEOSpider
```
### Запускаем bash скрипт для донастройки инстанса и бота
Он добавит виртуальный дисплей для вывода из JAVA, переключит бота в режим сохранения результатов на диске вместо RAM и т.д.
```
!wget https://raw.githubusercontent.com/fili/screaming-frog-on-google-compute-engine/master/gce-sf.sh -O install.sh && chmod +x install.sh && source ./install.sh
```
###Делаем симлинк скрытой папки с временными файлами и настройками бота
на случай если придется что-то редактировать или вынимать оттуда наживую, иначе ее не будет видно в браузере файлов слева
```
!ln -s ~/.ScreamingFrogSEOSpider ~/ScreamingFrogSEOSpider
```
###Даем команду боту в headless режиме
прописываем все нужные флаги для экспорта, настроек, отчетов, выгрузок и так далее
```
#@title Crawl settings { vertical-output: true }
url_start = "" #@param {type:"string"}
use_gcs = "" #@param ["", "--use-google-search-console \"account \""] {allow-input: true}
config_path = "" #@param {type:"string"}
output_folder = "" #@param {type:"string"}
!screamingfrogseospider --crawl "$url_start" $use_gcs --headless --config "$config_path" --output-folder "$output_folder" --timestamped-output --save-crawl --export-tabs "Internal:All,Response Codes:All,Response Codes:Blocked by Robots.txt,Response Codes:Blocked Resource,Response Codes:No Response,Response Codes:Redirection (3xx),Response Codes:Redirection (JavaScript),Response Codes:Redirection (Meta Refresh),Response Codes:Client Error (4xx),Response Codes:Server Error (5xx),Page Titles:All,Page Titles:Missing,Page Titles:Duplicate,Page Titles:Over X Characters,Page Titles:Below X Characters,Page Titles:Over X Pixels,Page Titles:Below X Pixels,Page Titles:Same as H1,Page Titles:Multiple,Meta Description:All,Meta Description:Missing,Meta Description:Duplicate,Meta Description:Over X Characters,Meta Description:Below X Characters,Meta Description:Over X Pixels,Meta Description:Below X Pixels,Meta Description:Multiple,Meta Keywords:All,Meta Keywords:Missing,Meta Keywords:Duplicate,Meta Keywords:Multiple,Canonicals:All,Canonicals:Contains Canonical,Canonicals:Self Referencing,Canonicals:Canonicalised,Canonicals:Missing,Canonicals:Multiple,Canonicals:Non-Indexable Canonical,Directives:All,Directives:Index,Directives:Noindex,Directives:Follow,Directives:Nofollow,Directives:None,Directives:NoArchive,Directives:NoSnippet,Directives:Max-Snippet,Directives:Max-Image-Preview,Directives:Max-Video-Preview,Directives:NoODP,Directives:NoYDIR,Directives:NoImageIndex,Directives:NoTranslate,Directives:Unavailable_After,Directives:Refresh,AMP:All,AMP:Non-200 Response,AMP:Missing Non-AMP Return Link,AMP:Missing Canonical to Non-AMP,AMP:Non-Indexable Canonical,AMP:Indexable,AMP:Non-Indexable,AMP:Missing <html amp> Tag,AMP:Missing/Invalid <!doctype html> Tag,AMP:Missing <head> Tag,AMP:Missing <body> Tag,AMP:Missing Canonical,AMP:Missing/Invalid <meta charset> Tag,AMP:Missing/Invalid <meta viewport> Tag,AMP:Missing/Invalid AMP Script,AMP:Missing/Invalid AMP Boilerplate,AMP:Contains Disallowed HTML,AMP:Other Validation Errors,Structured Data:All,Structured Data:Contains Structured Data,Structured Data:Missing,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:Parse Errors,Structured Data:Microdata URLs,Structured Data:JSON-LD URLs,Structured Data:RDFa URLs,Sitemaps:All,Sitemaps:URLs in Sitemap,Sitemaps:URLs not in Sitemap,Sitemaps:Orphan URLs,Sitemaps:Non-Indexable URLs in Sitemap,Sitemaps:URLs in Multiple Sitemaps,Sitemaps:XML Sitemap with over 50k URLs,Sitemaps:XML Sitemap over 50MB" --bulk-export "Canonicals:Contains Canonical Inlinks,Canonicals:Self Referencing Inlinks,Canonicals:Canonicalised Inlinks,Canonicals:Missing Inlinks,Canonicals:Multiple Inlinks,Canonicals:Non-Indexable Canonical Inlinks,AMP:All Inlinks,AMP:Non-200 Response Inlinks,AMP:Missing Non-AMP Return Link Inlinks,AMP:Missing Canonical to Non-AMP Inlinks,AMP:Non-Indexable Canonical Inlinks,AMP:Indexable Inlinks,AMP:Non-Indexable Inlinks,Structured Data:Contains Structured Data,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:JSON-LD URLs,Structured Data:Microdata URLs,Structured Data:RDFa URLs,Sitemaps:URLs in Sitemap Inlinks,Sitemaps:Orphan URLs Inlinks,Sitemaps:Non-Indexable URLs in Sitemap Inlinks,Sitemaps:URLs in Multiple Sitemaps Inlinks" --save-report "Crawl Overview,Redirects:All Redirects,Redirects:Redirect Chains,Redirects:Redirect & Canonical Chains,Canonicals:Canonical Chains,Canonicals:Non-Indexable Canonicals,Pagination:Non-200 Pagination URLs,Pagination:Unlinked Pagination URLs,Hreflang:All hreflang URLs,Hreflang:Non-200 hreflang URLs,Hreflang:Unlinked hreflang URLs,Hreflang:Missing Return Links,Hreflang:Inconsistent Language & Region Return Links,Hreflang:Non Canonical Return Links,Hreflang:Noindex Return Links,Insecure Content,SERP Summary,Orphan Pages,Structured Data:Validation Errors & Warnings Summary,Structured Data:Validation Errors & Warnings,Structured Data:Google Rich Results Features Summary,Structured Data:Google Rich Results Features,HTTP Headers:HTTP Header Summary,Cookies:Cookie Summary" --export-format xlsx --export-custom-summary "Site Crawled,Date,Time,Total URLs Encountered,Total URLs Crawled,Total Internal blocked by robots.txt,Total External blocked by robots.txt,URLs Displayed,Total Internal URLs,Total External URLs,Total Internal Indexable URLs,Total Internal Non-Indexable URLs,JavaScript:All,JavaScript:Uses Old AJAX Crawling Scheme URLs,JavaScript:Uses Old AJAX Crawling Scheme Meta Fragment Tag,JavaScript:Page Title Only in Rendered HTML,JavaScript:Page Title Updated by JavaScript,JavaScript:H1 Only in Rendered HTML,JavaScript:H1 Updated by JavaScript,JavaScript:Meta Description Only in Rendered HTML,JavaScript:Meta Description Updated by JavaScript,JavaScript:Canonical Only in Rendered HTML,JavaScript:Canonical Mismatch,JavaScript:Noindex Only in Original HTML,JavaScript:Nofollow Only in Original HTML,JavaScript:Contains JavaScript Links,JavaScript:Contains JavaScript Content,JavaScript:Pages with Blocked Resources,H1:All,H1:Missing,H1:Duplicate,H1:Over X Characters,H1:Multiple,H2:All,H2:Missing,H2:Duplicate,H2:Over X Characters,H2:Multiple,Internal:All,Internal:HTML,Internal:JavaScript,Internal:CSS,Internal:Images,Internal:PDF,Internal:Flash,Internal:Other,Internal:Unknown,External:All,External:HTML,External:JavaScript,External:CSS,External:Images,External:PDF,External:Flash,External:Other,External:Unknown,AMP:All,AMP:Non-200 Response,AMP:Missing Non-AMP Return Link,AMP:Missing Canonical to Non-AMP,AMP:Non-Indexable Canonical,AMP:Indexable,AMP:Non-Indexable,AMP:Missing <html amp> Tag,AMP:Missing/Invalid <!doctype html> Tag,AMP:Missing <head> Tag,AMP:Missing <body> Tag,AMP:Missing Canonical,AMP:Missing/Invalid <meta charset> Tag,AMP:Missing/Invalid <meta viewport> Tag,AMP:Missing/Invalid AMP Script,AMP:Missing/Invalid AMP Boilerplate,AMP:Contains Disallowed HTML,AMP:Other Validation Errors,Canonicals:All,Canonicals:Contains Canonical,Canonicals:Self Referencing,Canonicals:Canonicalised,Canonicals:Missing,Canonicals:Multiple,Canonicals:Non-Indexable Canonical,Content:All,Content:Spelling Errors,Content:Grammar Errors,Content:Near Duplicates,Content:Exact Duplicates,Content:Low Content Pages,Custom Extraction:All,Custom Search:All,Directives:All,Directives:Index,Directives:Noindex,Directives:Follow,Directives:Nofollow,Directives:None,Directives:NoArchive,Directives:NoSnippet,Directives:Max-Snippet,Directives:Max-Image-Preview,Directives:Max-Video-Preview,Directives:NoODP,Directives:NoYDIR,Directives:NoImageIndex,Directives:NoTranslate,Directives:Unavailable_After,Directives:Refresh,Analytics:All,Analytics:Sessions Above 0,Analytics:Bounce Rate Above 70%,Analytics:No GA Data,Analytics:Non-Indexable with GA Data,Analytics:Orphan URLs,Search Console:All,Search Console:Clicks Above 0,Search Console:No GSC Data,Search Console:Non-Indexable with GSC Data,Search Console:Orphan URLs,Hreflang:All,Hreflang:Contains hreflang,Hreflang:Non-200 hreflang URLs,Hreflang:Unlinked hreflang URLs,Hreflang:Missing Return Links,Hreflang:Inconsistent Language & Region Return Links,Hreflang:Non-Canonical Return Links,Hreflang:Noindex Return Links,Hreflang:Incorrect Language & Region Codes,Hreflang:Multiple Entries,Hreflang:Missing Self Reference,Hreflang:Not Using Canonical,Hreflang:Missing X-Default,Hreflang:Missing,Images:All,Images:Over X KB,Images:Missing Alt Text,Images:Missing Alt Attribute,Images:Alt Text Over X Characters,Link Metrics:All,Meta Description:All,Meta Description:Missing,Meta Description:Duplicate,Meta Description:Over X Characters,Meta Description:Below X Characters,Meta Description:Over X Pixels,Meta Description:Below X Pixels,Meta Description:Multiple,Meta Keywords:All,Meta Keywords:Missing,Meta Keywords:Duplicate,Meta Keywords:Multiple,PageSpeed:All,PageSpeed:Eliminate Render-Blocking Resources,PageSpeed:Defer Offscreen Images,PageSpeed:Efficiently Encode Images,PageSpeed:Properly Size Images,PageSpeed:Minify CSS,PageSpeed:Minify JavaScript,PageSpeed:Reduce Unused CSS,PageSpeed:Reduce Unused JavaScript,PageSpeed:Serve Images in Next-Gen Formats,PageSpeed:Enable Text Compression,PageSpeed:Preconnect to Required Origins,PageSpeed:Reduce Server Response Times (TTFB),PageSpeed:Avoid Multiple Page Redirects,PageSpeed:Preload Key Requests,PageSpeed:Use Video Formats for Animated Content,PageSpeed:Avoid Excessive DOM Size,PageSpeed:Reduce JavaScript Execution Time,PageSpeed:Serve Static Assets with an Efficient Cache Policy,PageSpeed:Minimize Main-Thread Work,PageSpeed:Ensure Text Remains Visible During Webfont Load,PageSpeed:Image Elements Do Not Have Explicit Width & Height,PageSpeed:Avoid Large Layout Shifts,PageSpeed:Avoid Serving Legacy JavaScript to Modern Browsers,PageSpeed:Request Errors,Pagination:All,Pagination:Contains Pagination,Pagination:First Page,Pagination:Paginated 2+ Pages,Pagination:Pagination URL Not in Anchor Tag,Pagination:Non-200 Pagination URLs,Pagination:Unlinked Pagination URLs,Pagination:Non-Indexable,Pagination:Multiple Pagination URLs,Pagination:Pagination Loop,Pagination:Sequence Error,Response Codes:All,Response Codes:Blocked by Robots.txt,Response Codes:Blocked Resource,Response Codes:No Response,Response Codes:Success (2xx),Response Codes:Redirection (3xx),Response Codes:Redirection (JavaScript),Response Codes:Redirection (Meta Refresh),Response Codes:Client Error (4xx),Response Codes:Server Error (5xx),Security:All,Security:HTTP URLs,Security:HTTPS URLs,Security:Mixed Content,Security:Form URL Insecure,Security:Form on HTTP URL,Security:Unsafe Cross-Origin Links,Security:Missing HSTS Header,Security:Bad Content Type,Security:Missing X-Content-Type-Options Header,Security:Missing X-Frame-Options Header,Security:Protocol-Relative Resource Links,Security:Missing Content-Security-Policy Header,Security:Missing Secure Referrer-Policy Header,Sitemaps:All,Sitemaps:URLs in Sitemap,Sitemaps:URLs not in Sitemap,Sitemaps:Orphan URLs,Sitemaps:Non-Indexable URLs in Sitemap,Sitemaps:URLs in Multiple Sitemaps,Sitemaps:XML Sitemap with over 50k URLs,Sitemaps:XML Sitemap over 50MB,Structured Data:All,Structured Data:Contains Structured Data,Structured Data:Missing,Structured Data:Validation Errors,Structured Data:Validation Warnings,Structured Data:Parse Errors,Structured Data:Microdata URLs,Structured Data:JSON-LD URLs,Structured Data:RDFa URLs,Page Titles:All,Page Titles:Missing,Page Titles:Duplicate,Page Titles:Over X Characters,Page Titles:Below X Characters,Page Titles:Over X Pixels,Page Titles:Below X Pixels,Page Titles:Same as H1,Page Titles:Multiple,URL:All,URL:Non ASCII Characters,URL:Underscores,URL:Uppercase,URL:Parameters,URL:Over X Characters,URL:Multiple Slashes,URL:Repetitive Path,URL:Contains Space,URL:Broken Bookmark,URL:Internal Search,Depth 1,Depth 2,Depth 3,Depth 4,Depth 5,Depth 6,Depth 7,Depth 8,Depth 9,Depth 10+,Top Inlinks 1 URL,Top Inlinks 1 Number of Inlinks,Top Inlinks 2 URL,Top Inlinks 2 Number of Inlinks,Top Inlinks 3 URL,Top Inlinks 3 Number of Inlinks,Top Inlinks 4 URL,Top Inlinks 4 Number of Inlinks,Top Inlinks 5 URL,Top Inlinks 5 Number of Inlinks,Top Inlinks 6 URL,Top Inlinks 6 Number of Inlinks,Top Inlinks 7 URL,Top Inlinks 7 Number of Inlinks,Top Inlinks 8 URL,Top Inlinks 8 Number of Inlinks,Top Inlinks 9 URL,Top Inlinks 9 Number of Inlinks,Top Inlinks 10 URL,Top Inlinks 10 Number of Inlinks,Top Inlinks 11 URL,Top Inlinks 11 Number of Inlinks,Top Inlinks 12 URL,Top Inlinks 12 Number of Inlinks,Top Inlinks 13 URL,Top Inlinks 13 Number of Inlinks,Top Inlinks 14 URL,Top Inlinks 14 Number of Inlinks,Top Inlinks 15 URL,Top Inlinks 15 Number of Inlinks,Top Inlinks 16 URL,Top Inlinks 16 Number of Inlinks,Top Inlinks 17 URL,Top Inlinks 17 Number of Inlinks,Top Inlinks 18 URL,Top Inlinks 18 Number of Inlinks,Top Inlinks 19 URL,Top Inlinks 19 Number of Inlinks,Top Inlinks 20 URL,Top Inlinks 20 Number of Inlinks,Response Times 0s to 1s,Response Times 1s to 2s,Response Times 2s to 3s,Response Times 3s to 4s,Response Times 4s to 5s,Response Times 5s to 6s,Response Times 6s to 7s,Response Times 7s to 8s,Response Times 8s to 9s,Response Times 10s or more"
```
# ✦ *Colab Still Alive Console Script:*
<p><font size=2px ><font color="red"> Tip - Set a javascript interval to click on the connect button every 60 seconds. Open developer-settings (in your web-browser) with Ctrl+Shift+I then click on console tab and type this on the console prompt. (for mac press Option+Command+I)</font></p><b>Copy script in hidden cell and paste at your browser console !!! DO NOT CLOSE YOUR BROWSER IN ORDER TO STILL RUNNING SCRIPT</b>
<code>function ClickConnect(){
console.log("Working");
document.querySelector("colab-connect-button").click()
}setInterval(ClickConnect,6000)</code>
# *Что в итоге*
На выходе в идеале получаем
папку с датой обхода и следующими выгрузками в формате Excel
**Tabs**:
```
Internal:All
Response Codes:All
Response Codes:Blocked by Robots.txt
Response Codes:Blocked Resource
Response Codes:No Response
Response Codes:Redirection (3xx)
Response Codes:Redirection (JavaScript)
Response Codes:Redirection (Meta Refresh)
Response Codes:Client Error (4xx)
Response Codes:Server Error (5xx)
Page Titles:All
Page Titles:Missing
Page Titles:Duplicate
Page Titles:Over X Characters
Page Titles:Below X Characters
Page Titles:Over X Pixels
Page Titles:Below X Pixels
Page Titles:Same as H1
Page Titles:Multiple
Meta Description:All
Meta Description:Missing
Meta Description:Duplicate
Meta Description:Over X Characters
Meta Description:Below X Characters
Meta Description:Over X Pixels
Meta Description:Below X Pixels
Meta Description:Multiple
Meta Keywords:All
Meta Keywords:Missing
Meta Keywords:Duplicate
Meta Keywords:Multiple
Canonicals:All
Canonicals:Contains Canonical
Canonicals:Self Referencing
Canonicals:Canonicalised
Canonicals:Missing
Canonicals:Multiple
Canonicals:Non-Indexable Canonical
Directives:All
Directives:Index
Directives:Noindex
Directives:Follow
Directives:Nofollow
Directives:None
Directives:NoArchive
Directives:NoSnippet
Directives:Max-Snippet
Directives:Max-Image-Preview
Directives:Max-Video-Preview
Directives:NoODP
Directives:NoYDIR
Directives:NoImageIndex
Directives:NoTranslate
Directives:Unavailable_After
Directives:Refresh
AMP:All
AMP:Non-200 Response
AMP:Missing Non-AMP Return Link
AMP:Missing Canonical to Non-AMP
AMP:Non-Indexable Canonical
AMP:Indexable
AMP:Non-Indexable
AMP:Missing <html amp> Tag
AMP:Missing/Invalid <!doctype html> Tag
AMP:Missing <head> Tag
AMP:Missing <body> Tag
AMP:Missing Canonical
AMP:Missing/Invalid <meta charset> Tag
AMP:Missing/Invalid <meta viewport> Tag
AMP:Missing/Invalid AMP Script
AMP:Missing/Invalid AMP Boilerplate
AMP:Contains Disallowed HTML
AMP:Other Validation Errors
Structured Data:All
Structured Data:Contains Structured Data
Structured Data:Missing
Structured Data:Validation Errors
Structured Data:Validation Warnings
Structured Data:Parse Errors
Structured Data:Microdata URLs
Structured Data:JSON-LD URLs
Structured Data:RDFa URLs
Sitemaps:All
Sitemaps:URLs in Sitemap
Sitemaps:URLs not in Sitemap
Sitemaps:Orphan URLs
Sitemaps:Non-Indexable URLs in Sitemap
Sitemaps:URLs in Multiple Sitemaps
Sitemaps:XML Sitemap with over 50k URLs
Sitemaps:XML Sitemap over 50MB" --bulk-export "Canonicals:Contains Canonical Inlinks
Canonicals:Self Referencing Inlinks
Canonicals:Canonicalised Inlinks
Canonicals:Missing Inlinks
Canonicals:Multiple Inlinks
Canonicals:Non-Indexable Canonical Inlinks
AMP:All Inlinks
AMP:Non-200 Response Inlinks
AMP:Missing Non-AMP Return Link Inlinks
AMP:Missing Canonical to Non-AMP Inlinks
AMP:Non-Indexable Canonical Inlinks
AMP:Indexable Inlinks
AMP:Non-Indexable Inlinks
Structured Data:Contains Structured Data
Structured Data:Validation Errors
Structured Data:Validation Warnings
Structured Data:JSON-LD URLs
Structured Data:Microdata URLs
Structured Data:RDFa URLs
Sitemaps:URLs in Sitemap Inlinks
Sitemaps:Orphan URLs Inlinks
Sitemaps:Non-Indexable URLs in Sitemap Inlinks
Sitemaps:URLs in Multiple Sitemaps Inlinks" --save-report "Crawl Overview
Redirects:All Redirects
Redirects:Redirect Chains
Redirects:Redirect & Canonical Chains
Canonicals:Canonical Chains
Canonicals:Non-Indexable Canonicals
Pagination:Non-200 Pagination URLs
Pagination:Unlinked Pagination URLs
Hreflang:All hreflang URLs
Hreflang:Non-200 hreflang URLs
Hreflang:Unlinked hreflang URLs
Hreflang:Missing Return Links
Hreflang:Inconsistent Language & Region Return Links
Hreflang:Non Canonical Return Links
Hreflang:Noindex Return Links
Insecure Content
SERP Summary
Orphan Pages
Structured Data:Validation Errors & Warnings Summary
Structured Data:Validation Errors & Warnings
Structured Data:Google Rich Results Features Summary
Structured Data:Google Rich Results Features
HTTP Headers:HTTP Header Summary
Cookies:Cookie Summary
```
**Summary**:
```
Site Crawled
Date
Time
Total URLs Encountered
Total URLs Crawled
Total Internal blocked by robots.txt
Total External blocked by robots.txt
URLs Displayed
Total Internal URLs
Total External URLs
Total Internal Indexable URLs
Total Internal Non-Indexable URLs
JavaScript:All
JavaScript:Uses Old AJAX Crawling Scheme URLs
JavaScript:Uses Old AJAX Crawling Scheme Meta Fragment Tag
JavaScript:Page Title Only in Rendered HTML
JavaScript:Page Title Updated by JavaScript
JavaScript:H1 Only in Rendered HTML
JavaScript:H1 Updated by JavaScript
JavaScript:Meta Description Only in Rendered HTML
JavaScript:Meta Description Updated by JavaScript
JavaScript:Canonical Only in Rendered HTML
JavaScript:Canonical Mismatch
JavaScript:Noindex Only in Original HTML
JavaScript:Nofollow Only in Original HTML
JavaScript:Contains JavaScript Links
JavaScript:Contains JavaScript Content
JavaScript:Pages with Blocked Resources
H1:All
H1:Missing
H1:Duplicate
H1:Over X Characters
H1:Multiple
H2:All
H2:Missing
H2:Duplicate
H2:Over X Characters
H2:Multiple
Internal:All
Internal:HTML
Internal:JavaScript
Internal:CSS
Internal:Images
Internal:PDF
Internal:Flash
Internal:Other
Internal:Unknown
External:All
External:HTML
External:JavaScript
External:CSS
External:Images
External:PDF
External:Flash
External:Other
External:Unknown
AMP:All
AMP:Non-200 Response
AMP:Missing Non-AMP Return Link
AMP:Missing Canonical to Non-AMP
AMP:Non-Indexable Canonical
AMP:Indexable
AMP:Non-Indexable
AMP:Missing <html amp> Tag
AMP:Missing/Invalid <!doctype html> Tag
AMP:Missing <head> Tag
AMP:Missing <body> Tag
AMP:Missing Canonical
AMP:Missing/Invalid <meta charset> Tag
AMP:Missing/Invalid <meta viewport> Tag
AMP:Missing/Invalid AMP Script
AMP:Missing/Invalid AMP Boilerplate
AMP:Contains Disallowed HTML
AMP:Other Validation Errors
Canonicals:All
Canonicals:Contains Canonical
Canonicals:Self Referencing
Canonicals:Canonicalised
Canonicals:Missing
Canonicals:Multiple
Canonicals:Non-Indexable Canonical
Content:All
Content:Spelling Errors
Content:Grammar Errors
Content:Near Duplicates
Content:Exact Duplicates
Content:Low Content Pages
Custom Extraction:All
Custom Search:All
Directives:All
Directives:Index
Directives:Noindex
Directives:Follow
Directives:Nofollow
Directives:None
Directives:NoArchive
Directives:NoSnippet
Directives:Max-Snippet
Directives:Max-Image-Preview
Directives:Max-Video-Preview
Directives:NoODP
Directives:NoYDIR
Directives:NoImageIndex
Directives:NoTranslate
Directives:Unavailable_After
Directives:Refresh
Analytics:All
Analytics:Sessions Above 0
Analytics:Bounce Rate Above 70%
Analytics:No GA Data
Analytics:Non-Indexable with GA Data
Analytics:Orphan URLs
Search Console:All
Search Console:Clicks Above 0
Search Console:No GSC Data
Search Console:Non-Indexable with GSC Data
Search Console:Orphan URLs
Hreflang:All
Hreflang:Contains hreflang
Hreflang:Non-200 hreflang URLs
Hreflang:Unlinked hreflang URLs
Hreflang:Missing Return Links
Hreflang:Inconsistent Language & Region Return Links
Hreflang:Non-Canonical Return Links
Hreflang:Noindex Return Links
Hreflang:Incorrect Language & Region Codes
Hreflang:Multiple Entries
Hreflang:Missing Self Reference
Hreflang:Not Using Canonical
Hreflang:Missing X-Default
Hreflang:Missing
Images:All
Images:Over X KB
Images:Missing Alt Text
Images:Missing Alt Attribute
Images:Alt Text Over X Characters
Link Metrics:All
Meta Description:All
Meta Description:Missing
Meta Description:Duplicate
Meta Description:Over X Characters
Meta Description:Below X Characters
Meta Description:Over X Pixels
Meta Description:Below X Pixels
Meta Description:Multiple
Meta Keywords:All
Meta Keywords:Missing
Meta Keywords:Duplicate
Meta Keywords:Multiple
PageSpeed:All
PageSpeed:Eliminate Render-Blocking Resources
PageSpeed:Defer Offscreen Images
PageSpeed:Efficiently Encode Images
PageSpeed:Properly Size Images
PageSpeed:Minify CSS
PageSpeed:Minify JavaScript
PageSpeed:Reduce Unused CSS
PageSpeed:Reduce Unused JavaScript
PageSpeed:Serve Images in Next-Gen Formats
PageSpeed:Enable Text Compression
PageSpeed:Preconnect to Required Origins
PageSpeed:Reduce Server Response Times (TTFB)
PageSpeed:Avoid Multiple Page Redirects
PageSpeed:Preload Key Requests
PageSpeed:Use Video Formats for Animated Content
PageSpeed:Avoid Excessive DOM Size
PageSpeed:Reduce JavaScript Execution Time
PageSpeed:Serve Static Assets with an Efficient Cache Policy
PageSpeed:Minimize Main-Thread Work
PageSpeed:Ensure Text Remains Visible During Webfont Load
PageSpeed:Image Elements Do Not Have Explicit Width & Height
PageSpeed:Avoid Large Layout Shifts
PageSpeed:Avoid Serving Legacy JavaScript to Modern Browsers
PageSpeed:Request Errors
Pagination:All
Pagination:Contains Pagination
Pagination:First Page
Pagination:Paginated 2+ Pages
Pagination:Pagination URL Not in Anchor Tag
Pagination:Non-200 Pagination URLs
Pagination:Unlinked Pagination URLs
Pagination:Non-Indexable
Pagination:Multiple Pagination URLs
Pagination:Pagination Loop
Pagination:Sequence Error
Response Codes:All
Response Codes:Blocked by Robots.txt
Response Codes:Blocked Resource
Response Codes:No Response
Response Codes:Success (2xx)
Response Codes:Redirection (3xx)
Response Codes:Redirection (JavaScript)
Response Codes:Redirection (Meta Refresh)
Response Codes:Client Error (4xx)
Response Codes:Server Error (5xx)
Security:All
Security:HTTP URLs
Security:HTTPS URLs
Security:Mixed Content
Security:Form URL Insecure
Security:Form on HTTP URL
Security:Unsafe Cross-Origin Links
Security:Missing HSTS Header
Security:Bad Content Type
Security:Missing X-Content-Type-Options Header
Security:Missing X-Frame-Options Header
Security:Protocol-Relative Resource Links
Security:Missing Content-Security-Policy Header
Security:Missing Secure Referrer-Policy Header
Sitemaps:All
Sitemaps:URLs in Sitemap
Sitemaps:URLs not in Sitemap
Sitemaps:Orphan URLs
Sitemaps:Non-Indexable URLs in Sitemap
Sitemaps:URLs in Multiple Sitemaps
Sitemaps:XML Sitemap with over 50k URLs
Sitemaps:XML Sitemap over 50MB
Structured Data:All
Structured Data:Contains Structured Data
Structured Data:Missing
Structured Data:Validation Errors
Structured Data:Validation Warnings
Structured Data:Parse Errors
Structured Data:Microdata URLs
Structured Data:JSON-LD URLs
Structured Data:RDFa URLs
Page Titles:All
Page Titles:Missing
Page Titles:Duplicate
Page Titles:Over X Characters
Page Titles:Below X Characters
Page Titles:Over X Pixels
Page Titles:Below X Pixels
Page Titles:Same as H1
Page Titles:Multiple
URL:All
URL:Non ASCII Characters
URL:Underscores
URL:Uppercase
URL:Parameters
URL:Over X Characters
URL:Multiple Slashes
URL:Repetitive Path
URL:Contains Space
URL:Broken Bookmark
URL:Internal Search
Depth 1
Depth 2
Depth 3
Depth 4
Depth 5
Depth 6
Depth 7
Depth 8
Depth 9
Depth 10+
Top Inlinks 1 URL
Top Inlinks 1 Number of Inlinks
Top Inlinks 2 URL
Top Inlinks 2 Number of Inlinks
Top Inlinks 3 URL
Top Inlinks 3 Number of Inlinks
Top Inlinks 4 URL
Top Inlinks 4 Number of Inlinks
Top Inlinks 5 URL
Top Inlinks 5 Number of Inlinks
Top Inlinks 6 URL
Top Inlinks 6 Number of Inlinks
Top Inlinks 7 URL
Top Inlinks 7 Number of Inlinks
Top Inlinks 8 URL
Top Inlinks 8 Number of Inlinks
Top Inlinks 9 URL
Top Inlinks 9 Number of Inlinks
Top Inlinks 10 URL
Top Inlinks 10 Number of Inlinks
Top Inlinks 11 URL
Top Inlinks 11 Number of Inlinks
Top Inlinks 12 URL
Top Inlinks 12 Number of Inlinks
Top Inlinks 13 URL
Top Inlinks 13 Number of Inlinks
Top Inlinks 14 URL
Top Inlinks 14 Number of Inlinks
Top Inlinks 15 URL
Top Inlinks 15 Number of Inlinks
Top Inlinks 16 URL
Top Inlinks 16 Number of Inlinks
Top Inlinks 17 URL
Top Inlinks 17 Number of Inlinks
Top Inlinks 18 URL
Top Inlinks 18 Number of Inlinks
Top Inlinks 19 URL
Top Inlinks 19 Number of Inlinks
Top Inlinks 20 URL
Top Inlinks 20 Number of Inlinks
Response Times 0s to 1s
Response Times 1s to 2s
Response Times 2s to 3s
Response Times 3s to 4s
Response Times 4s to 5s
Response Times 5s to 6s
Response Times 6s to 7s
Response Times 7s to 8s
Response Times 8s to 9s
Response Times 10s or more" ```
| github_jupyter |
## _*Using Qiskit Aqua for clique problems*_
This Qiskit Aqua Optimization notebook demonstrates how to use the VQE quantum algorithm to compute the clique of a given graph.
The problem is defined as follows. A clique in a graph $G$ is a complete subgraph of $G$. That is, it is a subset $K$ of the vertices such that every two vertices in $K$ are the two endpoints of an edge in $G$. A maximal clique is a clique to which no more vertices can be added. A maximum clique is a clique that includes the largest possible number of vertices.
We will go through three examples to show (1) how to run the optimization in the non-programming way, (2) how to run the optimization in the programming way, (3) how to run the optimization with the VQE.
We will omit the details for the support of CPLEX, which are explained in other notebooks such as maxcut.
Note that the solution may not be unique.
### The problem and a brute-force method.
```
import numpy as np
from qiskit import Aer
from qiskit_aqua import run_algorithm
from qiskit_aqua.input import EnergyInput
from qiskit_aqua.translators.ising import clique
from qiskit_aqua.algorithms import ExactEigensolver
```
first, let us have a look at the graph, which is in the adjacent matrix form.
```
K = 3 # K means the size of the clique
np.random.seed(100)
num_nodes = 5
w = clique.random_graph(num_nodes, edge_prob=0.8, weight_range=10)
print(w)
```
Let us try a brute-force method. Basically, we exhaustively try all the binary assignments. In each binary assignment, the entry of a vertex is either 0 (meaning the vertex is not in the clique) or 1 (meaning the vertex is in the clique). We print the binary assignment that satisfies the definition of the clique (Note the size is specified as K).
```
def brute_force():
# brute-force way: try every possible assignment!
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result]
L = num_nodes # length of the bitstring that represents the assignment
max = 2**L
has_sol = False
for i in range(max):
cur = bitfield(i, L)
cur_v = clique.satisfy_or_not(np.array(cur), w, K)
if cur_v:
has_sol = True
break
return has_sol, cur
has_sol, sol = brute_force()
if has_sol:
print("solution is ", sol)
else:
print("no solution found for K=", K)
```
### Part I: run the optimization in the non-programming way
```
qubit_op, offset = clique.get_clique_qubitops(w, K)
algo_input = EnergyInput(qubit_op)
params = {
'problem': {'name': 'ising'},
'algorithm': {'name': 'ExactEigensolver'}
}
result = run_algorithm(params, algo_input)
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
```
### Part II: run the optimization in the programming way
```
algo = ExactEigensolver(algo_input.qubit_op, k=1, aux_operators=[])
result = algo.run()
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
```
### Part III: run the optimization with the VQE
```
algorithm_cfg = {
'name': 'VQE',
'operator_mode': 'matrix'
}
optimizer_cfg = {
'name': 'COBYLA'
}
var_form_cfg = {
'name': 'RY',
'depth': 5,
'entanglement': 'linear'
}
params = {
'problem': {'name': 'ising', 'random_seed': 10598},
'algorithm': algorithm_cfg,
'optimizer': optimizer_cfg,
'variational_form': var_form_cfg
}
backend = Aer.get_backend('statevector_simulator')
result = run_algorithm(params, algo_input, backend=backend)
x = clique.sample_most_likely(len(w), result['eigvecs'][0])
ising_sol = clique.get_graph_solution(x)
if clique.satisfy_or_not(ising_sol, w, K):
print("solution is", ising_sol)
else:
print("no solution found for K=", K)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
# 1. Деревья решений для классификации (продолжение)
На прошлом занятии мы разобрали идею Деревьев решений:

Давайте теперь разберемся **как происходит разделения в каждом узле** то есть как проходит этап **обучения модели**. Есть как минимум две причины в этом разобраться : во-первых это позволит нам решать задачи классификации на 3 и более классов, во-вторых это даст нам возможность считать *важность* признаков в обученной модели.
Для начала посмотрим какие бывают деревья решений
----
Дерево решений вообще говоря **не обязано быть бинарным**, на практике однако используются именно бинарные деревья, поскольку для любоого не бинарного дерева решений **можно построить бинарное** (при этом увеличится глубина дерева).
### 1. Деревья решений использую простой одномерный предикат для разделения объектов
Имеется ввиду что в каждом узле разделение объектов (и создание двух новых узлов) происходит **по 1 (одному)** признаку:
*Все объекты со значением некоторого признака меньше трешхолда отправляются в один узел, а больше - в другой:*
$$
[x_j < t]
$$
Вообще говоря это совсем не обязательно, например в каждом отдельном узле можно строить любую модель (например логистическую регрессию или KNN), рассматривая сразу несколько признаков.
### 2. Оценка качества
Мы говорили про простой функционал качества разбиения (**выбора трешхолда**): количество ошибок (1-accuracy).
На практике используются два критерия: Gini's impurity index и Information gain.
**Индекс Джини**
$$
I_{Gini} = 1 - \sum_i^K p_i^2
$$
где $K$ - количество классов, a $p_i = \frac{|n_i|}{n}$ - доля представителей $i$ - ого класса в данном узле
**Энтропия**
$$
H(p) = - \sum_i^K p_i\log(p_i)
$$
**Информационный критерий**
$$
IG(p) = H(\text{parent}) - H(\text{child})
$$
#### Разделение производится по тому трешхолду и тому признаку по которому взвешенное среднее функционала качества в узлах потомках наименьшее.
### 3. Критерий остановки
Мы с вами говорили о таких параметрах Решающего дерева как минимальное число объектов в листе,
и минимальное число объектов в узле, для того чтобы он был разделен на два. Еще один критерий -
глубина дерева. Возможны и другие.
* Ограничение числа объектов в листе
* Ограничение числа объектов в узле, для того чтобы он был разделен
* Ограничение глубины дерева
* Ограничение минимального прироста Энтропии или Информационного критерия при разделении
* Остановка в случае если все объекты в листе принадлежат к одному классу
На прошлой лекции мы обсуждали технику которая называется **Прунинг** (pruning) это альтернатива Критериям остановки, когда сначала строится переобученное дерево, а затем она каким то образом упрощается. На практике по ряду причин чаще используются критерии остановки, а не прунинг.
Подробнее см. https://github.com/esokolov/ml-course-hse/blob/master/2018-fall/lecture-notes/lecture07-trees.pdf
Оссобенности разбиения непрерывных признаков
* http://kevinmeurer.com/a-simple-guide-to-entropy-based-discretization/
* http://clear-lines.com/blog/post/Discretizing-a-continuous-variable-using-Entropy.aspx
---
## 1.1. Оценка качества разделения в узле
```
def gini_impurity(y_current):
n = y_current.shape[0]
val, count = np.unique(y_current, return_counts=True)
gini = 1 - ((count/n)**2).sum()
return gini
def entropy(y_current):
gini = 1
n = y_current.shape[0]
val, count = np.unique(y_current, return_counts=True)
p = count/n
igain = p.dot(np.log(p))
return igain
n = 100
Y_example = np.zeros((100,100))
for i in range(100):
for j in range(i, 100):
Y_example[i, j] = 1
gini = [gini_impurity(y) for y in Y_example]
ig = [-entropy(y) for y in Y_example]
plt.figure(figsize=(7,7))
plt.plot(np.linspace(0,1,100), gini, label='Index Gini');
plt.plot(np.linspace(0,1,100), ig, label ='Entropy');
plt.legend()
plt.xlabel('Доля примеров\n положительного класса')
plt.ylabel('Значение оптимизируемого\n функционала');
```
## 1.2. Пример работы Решающего дерева
**Индекс Джини** и **Информационный критерий** это меры сбалансированности вектора (насколько значения объектов в наборе однородны). Максимальная неоднородность когда объектов разных классов поровну. Максимальная однородность когда в наборе объекты одного класса.
Разбивая множество объектов на два подмножества, мы стремимся уменьшить неоднородность в каждом подмножестве.
Посмотрем на примере Ирисов Фишера
### Ирисы Фишера
```
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
model = DecisionTreeClassifier()
model = model.fit(iris.data, iris.target)
feature_names = ['sepal length', 'sepal width', 'petal length', 'petal width']
target_names = ['setosa', 'versicolor', 'virginica']
model.feature_importances_
np.array(model.decision_path(iris.data).todense())[0]
np.array(model.decision_path(iris.data).todense())[90]
iris.data[0]
model.predict(iris.data)
model.tree_.node_count
```
### Цифры. Интерпретируемость
```
from sklearn.datasets import load_digits
X, y = load_digits(n_class=2, return_X_y=True)
plt.figure(figsize=(12,12))
for i in range(9):
ax = plt.subplot(3,3,i+1)
ax.imshow(X[i].reshape(8,8), cmap='gray')
from sklearn.metrics import accuracy_score
model = DecisionTreeClassifier()
model.fit(X, y)
y_pred = model.predict(X)
print(accuracy_score(y, y_pred))
print(X.shape)
np.array(model.decision_path(X).todense())[0]
model.feature_importances_
plt.imshow(model.feature_importances_.reshape(8,8));
from sklearn.tree import export_graphviz
export_graphviz(model, out_file='tree.dot', filled=True)
# #sudo apt-get install graphviz
# !dot -Tpng 'tree.dot' -o 'tree.png'
# 
np.array(model.decision_path(X).todense())[0]
plt.imshow(X[0].reshape(8,8))
```
## 2.3. Решающие деревья легко обобщаются на задачу многоклассовой классификации
### Пример с рукописными цифрами
```
X, y = load_digits(n_class=10, return_X_y=True)
plt.figure(figsize=(12,12))
for i in range(9):
ax = plt.subplot(3,3,i+1)
ax.imshow(X[i].reshape(8,8), cmap='gray')
ax.set_title(y[i])
ax.set_xticks([])
ax.set_yticks([])
model = DecisionTreeClassifier()
model.fit(X, y)
y_pred = model.predict(X)
print(accuracy_score(y, y_pred))
plt.imshow(model.feature_importances_.reshape(8,8));
model.feature_importances_
```
### Вопрос: откуда мы получаем feature importance?
## 2.4. Пример на котором дерево решений строит очень сложную разделяющую кривую
Пример взят отсюда https://habr.com/ru/company/ods/blog/322534/#slozhnyy-sluchay-dlya-derevev-resheniy .
Как мы помним Деревья используют одномерный предикат для разделени множества объектов.
Это значит что если данные плохо разделимы по **каждому** (индивидуальному) признаку по отдельности, результирующее решающее правило может оказаться очень сложным.
```
from sklearn.tree import DecisionTreeClassifier
def form_linearly_separable_data(n=500, x1_min=0, x1_max=30, x2_min=0, x2_max=30):
data, target = [], []
for i in range(n):
x1, x2 = np.random.randint(x1_min, x1_max), np.random.randint(x2_min, x2_max)
if np.abs(x1 - x2) > 0.5:
data.append([x1, x2])
target.append(np.sign(x1 - x2))
return np.array(data), np.array(target)
X, y = form_linearly_separable_data()
plt.figure(figsize=(10,10))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='autumn');
```
Давайте посмотрим как данные выглядит в проекции на 1 ось
```
plt.figure(figsize=(15,5))
ax1 = plt.subplot(1,2,1)
ax1.set_title('Проекция на ось $X_0$')
ax1.hist(X[y==1, 0], alpha=.3);
ax1.hist(X[y==-1, 0], alpha=.6);
ax2 = plt.subplot(1,2,2)
ax2.set_title('Проекция на ось $X_1$')
ax2.hist(X[y==1, 1], alpha=.3);
ax2.hist(X[y==-1, 1], alpha=.6);
def get_grid(data, eps=0.01):
x_min, x_max = data[:, 0].min() - 1, data[:, 0].max() + 1
y_min, y_max = data[:, 1].min() - 1, data[:, 1].max() + 1
return np.meshgrid(np.arange(x_min, x_max, eps),
np.arange(y_min, y_max, eps))
tree = DecisionTreeClassifier(random_state=17).fit(X, y)
xx, yy = get_grid(X, eps=.05)
predicted = tree.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(10,10))
plt.pcolormesh(xx, yy, predicted, cmap='autumn', alpha=0.3)
plt.scatter(X[y==1, 0], X[y==1, 1], marker='x', s=100, cmap='autumn', linewidth=1.5)
plt.scatter(X[y==-1, 0], X[y==-1, 1], marker='o', s=100, cmap='autumn', edgecolors='k',linewidth=1.5)
plt.title('Easy task. Decision tree compexifies everything');
# export_graphviz(tree, out_file='complex_tree.dot', filled=True)
# !dot -Tpng 'complex_tree.dot' -o 'complex_tree.png'
```
## 2.5. Деревья решений для регрессии (кратко)
см. sklearn.DecisionTreeRegressor
# 3. Ансамблирование деревьев. Случайный лес.
Что если у нас несколько классификаторов (каждый может быть не очень *умным*) ошибающихся на разных объектах
Тогда если в качестве предсказания мы будем использовать *моду* мы можем расчитывать на лучшую предсказательную силу.
### Идея 1
Как получить модели которые ошибаются в разных местах?
Давайте брать *тупые* деревья но учить их на **разных подвыборках признаков** !
### Идея 2
Как получить модели которые ошибаются в разных местах?
Давайте брать *тупые* деревья, но учить их на **разных подвыборках объектов** !
### Результат: Случайный лес.
sklearn.ensemble RandomForrest
| github_jupyter |
```
import re
import numpy as np
import pandas as pd
import collections
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from sklearn.model_selection import train_test_split
from unidecode import unidecode
from tqdm import tqdm
import time
rules_normalizer = {
'experience': 'pengalaman',
'bagasi': 'bagasi',
'kg': 'kampung',
'kilo': 'kilogram',
'g': 'gram',
'grm': 'gram',
'k': 'okay',
'abgkat': 'abang dekat',
'abis': 'habis',
'ade': 'ada',
'adoi': 'aduh',
'adoii': 'aduhh',
'aerodarat': 'kapal darat',
'agkt': 'angkat',
'ahh': 'ah',
'ailior': 'air liur',
'airasia': 'air asia x',
'airasiax': 'penerbangan',
'airline': 'penerbangan',
'airlines': 'penerbangan',
'airport': 'lapangan terbang',
'airpot': 'lapangan terbang',
'aje': 'sahaja',
'ajelah': 'sahajalah',
'ajer': 'sahaja',
'ak': 'aku',
'aq': 'aku',
'all': 'semua',
'ambik': 'ambil',
'amek': 'ambil',
'amer': 'amir',
'amik': 'ambil',
'ana': 'saya',
'angkt': 'angkat',
'anual': 'tahunan',
'apapun': 'apa pun',
'ape': 'apa',
'arab': 'arab',
'area': 'kawasan',
'aritu': 'hari itu',
'ask': 'tanya',
'astro': 'astro',
'at': 'pada',
'attitude': 'sikap',
'babi': 'khinzir',
'back': 'belakang',
'bag': 'beg',
'bang': 'abang',
'bangla': 'bangladesh',
'banyk': 'banyak',
'bard': 'pujangga',
'bargasi': 'bagasi',
'bawak': 'bawa',
'bawanges': 'bawang',
'be': 'jadi',
'behave': 'berkelakuan baik',
'belagak': 'berlagak',
'berdisiplin': 'berdisplin',
'berenti': 'berhenti',
'beskal': 'basikal',
'bff': 'rakan karib',
'bg': 'bagi',
'bgi': 'bagi',
'biase': 'biasa',
'big': 'besar',
'bike': 'basikal',
'bile': 'bila',
'binawe': 'binatang',
'bini': 'isteri',
'bkn': 'bukan',
'bla': 'bila',
'blom': 'belum',
'bnyak': 'banyak',
'body': 'tubuh',
'bole': 'boleh',
'boss': 'bos',
'bowling': 'boling',
'bpe': 'berapa',
'brand': 'jenama',
'brg': 'barang',
'briefing': 'taklimat',
'brng': 'barang',
'bro': 'abang',
'bru': 'baru',
'bruntung': 'beruntung',
'bsikal': 'basikal',
'btnggjwb': 'bertanggungjawab',
'btul': 'betul',
'buatlh': 'buatlah',
'buh': 'letak',
'buka': 'buka',
'but': 'tetapi',
'bwk': 'bawa',
'by': 'dengan',
'byr': 'bayar',
'bz': 'sibuk',
'camera': 'kamera',
'camni': 'macam ini',
'cane': 'macam mana',
'cant': 'tak boleh',
'carakerja': 'cara kerja',
'care': 'jaga',
'cargo': 'kargo',
'cctv': 'kamera litar tertutup',
'celako': 'celaka',
'cer': 'cerita',
'cheap': 'murah',
'check': 'semak',
'ciput': 'sedikit',
'cite': 'cerita',
'citer': 'cerita',
'ckit': 'sikit',
'ckp': 'cakap',
'class': 'kelas',
'cm': 'macam',
'cmni': 'macam ini',
'cmpak': 'campak',
'committed': 'komited',
'company': 'syarikat',
'complain': 'aduan',
'corn': 'jagung',
'couldnt': 'tak boleh',
'cr': 'cari',
'crew': 'krew',
'cube': 'cuba',
'cuma': 'cuma',
'curinyaa': 'curinya',
'cust': 'pelanggan',
'customer': 'pelanggan',
'd': 'di',
'da': 'dah',
'dn': 'dan',
'dahh': 'dah',
'damaged': 'rosak',
'dapek': 'dapat',
'day': 'hari',
'dazrin': 'dazrin',
'dbalingnya': 'dibalingnya',
'de': 'ada',
'deep': 'dalam',
'deliberately': 'sengaja',
'depa': 'mereka',
'dessa': 'desa',
'dgn': 'dengan',
'dh': 'dah',
'didunia': 'di dunia',
'diorang': 'mereka',
'diorng': 'mereka',
'direct': 'secara terus',
'diving': 'junam',
'dkt': 'dekat',
'dlempar': 'dilempar',
'dlm': 'dalam',
'dlt': 'padam',
'dlu': 'dulu',
'done': 'siap',
'dont': 'jangan',
'dorg': 'mereka',
'dpermudhkn': 'dipermudahkan',
'dpt': 'dapat',
'dr': 'dari',
'dri': 'dari',
'dsb': 'dan sebagainya',
'dy': 'dia',
'educate': 'mendidik',
'ensure': 'memastikan',
'everything': 'semua',
'ewahh': 'wah',
'expect': 'sangka',
'fb': 'facebook',
'fired': 'pecat',
'first': 'pertama',
'fkr': 'fikir',
'flight': 'kapal terbang',
'for': 'untuk',
'free': 'percuma',
'friend': 'kawan',
'fyi': 'untuk pengetahuan anda',
'gantila': 'gantilah',
'gantirugi': 'ganti rugi',
'gentlemen': 'lelaki budiman',
'gerenti': 'jaminan',
'gile': 'gila',
'gk': 'juga',
'gnti': 'ganti',
'go': 'pergi',
'gomen': 'kerajaan',
'goment': 'kerajaan',
'good': 'baik',
'ground': 'tanah',
'guarno': 'macam mana',
'hampa': 'mereka',
'hampeh': 'teruk',
'hanat': 'jahanam',
'handle': 'kawal',
'handling': 'kawalan',
'hanta': 'hantar',
'haritu': 'hari itu',
'hate': 'benci',
'have': 'ada',
'hawau': 'celaka',
'henpon': 'telefon',
'heran': 'hairan',
'him': 'dia',
'his': 'dia',
'hmpa': 'mereka',
'hntr': 'hantar',
'hotak': 'otak',
'hr': 'hari',
'i': 'saya',
'hrga': 'harga',
'hrp': 'harap',
'hu': 'sedih',
'humble': 'merendah diri',
'ibon': 'ikon',
'ichi': 'inci',
'idung': 'hidung',
'if': 'jika',
'ig': 'instagram',
'iklas': 'ikhlas',
'improve': 'menambah baik',
'in': 'masuk',
'isn t': 'tidak',
'isyaallah': 'insyallah',
'ja': 'sahaja',
'japan': 'jepun',
'jd': 'jadi',
'je': 'saja',
'jee': 'saja',
'jek': 'saja',
'jepun': 'jepun',
'jer': 'saja',
'jerr': 'saja',
'jez': 'saja',
'jg': 'juga',
'jgk': 'juga',
'jgn': 'jangan',
'jgnla': 'janganlah',
'jibake': 'celaka',
'jjur': 'jujur',
'job': 'kerja',
'jobscope': 'skop kerja',
'jogja': 'jogjakarta',
'jpam': 'jpam',
'jth': 'jatuh',
'jugak': 'juga',
'ka': 'ke',
'kalo': 'kalau',
'kalu': 'kalau',
'kang': 'nanti',
'kantoi': 'temberang',
'kasi': 'beri',
'kat': 'dekat',
'kbye': 'ok bye',
'kearah': 'ke arah',
'kecik': 'kecil',
'keja': 'kerja',
'keje': 'kerja',
'kejo': 'kerja',
'keksongan': 'kekosongan',
'kemana': 'ke mana',
'kene': 'kena',
'kenekan': 'kenakan',
'kesah': 'kisah',
'ketempat': 'ke tempat',
'kije': 'kerja',
'kijo': 'kerja',
'kiss': 'cium',
'kite': 'kita',
'kito': 'kita',
'kje': 'kerja',
'kjr': 'kerja',
'kk': 'okay',
'kmi': 'kami',
'kt': 'kat',
'tlg': 'tolong',
'kl': 'kuala lumpur',
'klai': 'kalau',
'klau': 'kalau',
'klia': 'klia',
'klo': 'kalau',
'klu': 'kalau',
'kn': 'kan',
'knapa': 'kenapa',
'kne': 'kena',
'ko': 'kau',
'kompom': 'sah',
'korang': 'kamu semua',
'korea': 'korea',
'korg': 'kamu semua',
'kot': 'mungkin',
'krja': 'kerja',
'ksalahan': 'kesalahan',
'kta': 'kita',
'kuar': 'keluar',
'kut': 'mungkin',
'la': 'lah',
'laa': 'lah',
'lahabau': 'celaka',
'lahanat': 'celaka',
'lainda': 'lain dah',
'lak': 'pula',
'last': 'akhir',
'le': 'lah',
'leader': 'ketua',
'leave': 'pergi',
'ler': 'lah',
'less': 'kurang',
'letter': 'surat',
'lg': 'lagi',
'lgi': 'lagi',
'lngsong': 'langsung',
'lol': 'hehe',
'lorr': 'lah',
'low': 'rendah',
'lps': 'lepas',
'luggage': 'bagasi',
'lumbe': 'lumba',
'lyak': 'layak',
'maap': 'maaf',
'maapkan': 'maafkan',
'mahai': 'mahal',
'mampos': 'mampus',
'mart': 'kedai',
'mau': 'mahu',
'mcm': 'macam',
'mcmtu': 'macam itu',
'memerlukn': 'memerlukan',
'mengembirakan': 'menggembirakan',
'mengmbilnyer': 'mengambilnya',
'mengtasi': 'mengatasi',
'mg': 'memang',
'mihak': 'memihak',
'min': 'admin',
'mingu': 'minggu',
'mintak': 'minta',
'mjtuhkn': 'menjatuhkan',
'mkyong': 'mak yong',
'mlibatkn': 'melibatkan',
'mmg': 'memang',
'mmnjang': 'memanjang',
'mmpos': 'mampus',
'mn': 'mana',
'mna': 'mana',
'mntak': 'minta',
'mntk': 'minta',
'mnyusun': 'menyusun',
'mood': 'suasana',
'most': 'paling',
'mr': 'tuan',
'msa': 'masa',
'msia': 'malaysia',
'mst': 'mesti',
'mu': 'awak',
'much': 'banyak',
'muko': 'muka',
'mum': 'emak',
'n': 'dan',
'nah': 'nah',
'nanny': 'nenek',
'napo': 'kenapa',
'nati': 'nanti',
'ngan': 'dengan',
'ngn': 'dengan',
'ni': 'ini',
'nie': 'ini',
'nii': 'ini',
'nk': 'nak',
'nmpk': 'nampak',
'nye': 'nya',
'ofis': 'pejabat',
'ohh': 'oh',
'oii': 'hoi',
'one': 'satu',
'online': 'dalam talian',
'or': 'atau',
'org': 'orang',
'orng': 'orang',
'otek': 'otak',
'p': 'pergi',
'paid': 'dah bayar',
'palabana': 'kepala otak',
'pasni': 'lepas ini',
'passengers': 'penumpang',
'passengger': 'penumpang',
'pastu': 'lepas itu',
'pd': 'pada',
'pegi': 'pergi',
'pekerje': 'pekerja',
'pekrja': 'pekerja',
'perabih': 'perabis',
'perkerja': 'pekerja',
'pg': 'pergi',
'phuii': 'puih',
'pikir': 'fikir',
'pilot': 'juruterbang',
'pk': 'fikir',
'pkerja': 'pekerja',
'pkerjaan': 'pekerjaan',
'pki': 'pakai',
'please': 'tolong',
'pls': 'tolong',
'pn': 'pun',
'pnh': 'pernah',
'pnt': 'penat',
'pnya': 'punya',
'pon': 'pun',
'priority': 'keutamaan',
'properties': 'harta benda',
'ptugas': 'petugas',
'pub': 'kelab malam',
'pulak': 'pula',
'puye': 'punya',
'pwrcuma': 'percuma',
'pyahnya': 'payahnya',
'quality': 'kualiti',
'quit': 'keluar',
'ramly': 'ramly',
'rege': 'harga',
'reger': 'harga',
'report': 'laporan',
'resigned': 'meletakkan jawatan',
'respect': 'hormat',
'rizal': 'rizal',
'rosak': 'rosak',
'rosok': 'rosak',
'rse': 'rasa',
'sacked': 'buang',
'sado': 'tegap',
'salute': 'sanjung',
'sam': 'sama',
'same': 'sama',
'samp': 'sampah',
'sbb': 'sebab',
'sbgai': 'sebagai',
'sblm': 'sebelum',
'sblum': 'sebelum',
'sbnarnya': 'sebenarnya',
'sbum': 'sebelum',
'sdg': 'sedang',
'sebb': 'sebab',
'sebijik': 'sebiji',
'see': 'lihat',
'seen': 'dilihat',
'selangor': 'selangor',
'selfie': 'swafoto',
'sempoi': 'cantik',
'senaraihitam': 'senarai hitam',
'seorg': 'seorang',
'service': 'perkhidmatan',
'sgt': 'sangat',
'shared': 'kongsi',
'shirt': 'kemeja',
'shut': 'tutup',
'sib': 'nasib',
'skali': 'sekali',
'sket': 'sikit',
'sma': 'sama',
'smoga': 'semoga',
'smpoi': 'cantik',
'sndiri': 'sendiri',
'sndr': 'sendiri',
'sndri': 'sendiri',
'sne': 'sana',
'so': 'jadi',
'sop': 'tatacara pengendalian piawai',
'sorang': 'seorang',
'spoting': 'pembintikan',
'sronok': 'seronok',
'ssh': 'susah',
'staff': 'staf',
'standing': 'berdiri',
'start': 'mula',
'steady': 'mantap',
'stiap': 'setiap',
'stress': 'stres',
'student': 'pelajar',
'study': 'belajar',
'studycase': 'kajian kes',
'sure': 'pasti',
'sykt': 'syarikat',
'tah': 'entah',
'taik': 'tahi',
'takan': 'tak akan',
'takat': 'setakat',
'takde': 'tak ada',
'takkan': 'tak akan',
'taknak': 'tak nak',
'tang': 'tentang',
'tanggungjawab': 'bertanggungjawab',
'taraa': 'sementara',
'tau': 'tahu',
'tbabit': 'terbabit',
'team': 'pasukan',
'terbaekk': 'terbaik',
'teruknye': 'teruknya',
'tgk': 'tengok',
'that': 'itu',
'thinking': 'fikir',
'those': 'itu',
'time': 'masa',
'tk': 'tak',
'tnggongjwb': 'tanggungjawab',
'tngok': 'tengok',
'tngu': 'tunggu',
'to': 'kepada',
'tosak': 'rosak',
'tp': 'tapi',
'tpi': 'tapi',
'tpon': 'telefon',
'transfer': 'pindah',
'trgelak': 'tergelak',
'ts': 'tan sri',
'tstony': 'tan sri tony',
'tu': 'itu',
'tuh': 'itu',
'tula': 'itulah',
'umeno': 'umno',
'unfortunately': 'malangnya',
'unhappy': 'tidak gembira',
'up': 'naik',
'upkan': 'naikkan',
'ur': 'awak',
'utk': 'untuk',
'very': 'sangat',
'viral': 'tular',
'vote': 'undi',
'warning': 'amaran',
'warranty': 'waranti',
'wassap': 'whatsapp',
'wat': 'apa',
'weii': 'wei',
'well': 'maklumlah',
'win': 'menang',
'with': 'dengan',
'wt': 'buat',
'x': 'tak',
'tw': 'tahu',
'ye': 'ya',
'yee': 'ya',
'yg': 'yang',
'yng': 'yang',
'you': 'awak',
'your': 'awak',
'sakai': 'selekeh',
'rmb': 'billion ringgit',
'rmj': 'juta ringgit',
'rmk': 'ribu ringgit',
'rm': 'ringgit',
}
permulaan = [
'bel',
'se',
'ter',
'men',
'meng',
'mem',
'memper',
'di',
'pe',
'me',
'ke',
'ber',
'pen',
'per',
]
hujung = ['kan', 'kah', 'lah', 'tah', 'nya', 'an', 'wan', 'wati', 'ita']
def naive_stemmer(word):
assert isinstance(word, str), 'input must be a string'
hujung_result = [e for e in hujung if word.endswith(e)]
if len(hujung_result):
hujung_result = max(hujung_result, key = len)
if len(hujung_result):
word = word[: -len(hujung_result)]
permulaan_result = [e for e in permulaan if word.startswith(e)]
if len(permulaan_result):
permulaan_result = max(permulaan_result, key = len)
if len(permulaan_result):
word = word[len(permulaan_result) :]
return word
def build_dataset(words, n_words):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 3)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def classification_textcleaning(string):
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
string = unidecode(string).replace('.', ' . ').replace(',', ' , ')
string = re.sub('[^A-Za-z ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string.lower()).strip()
string = [rules_normalizer.get(w, w) for w in string.split()]
string = [naive_stemmer(word) for word in string]
return ' '.join([word for word in string if len(word) > 1])
def str_idx(corpus, dic, maxlen, UNK = 3):
X = np.zeros((len(corpus), maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
X[i, -1 - no] = dic.get(k, UNK)
return X
classification_textcleaning('kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya')
df = pd.read_csv('sentiment-data-v2.csv')
Y = LabelEncoder().fit_transform(df.label)
with open('polarity-negative-translated.txt','r') as fopen:
texts = fopen.read().split('\n')
labels = [0] * len(texts)
with open('polarity-positive-translated.txt','r') as fopen:
positive_texts = fopen.read().split('\n')
labels += [1] * len(positive_texts)
texts += positive_texts
texts += df.iloc[:,1].tolist()
labels += Y.tolist()
assert len(labels) == len(texts)
import json
with open('bm-amazon.json') as fopen:
amazon = json.load(fopen)
with open('bm-imdb.json') as fopen:
imdb = json.load(fopen)
with open('bm-yelp.json') as fopen:
yelp = json.load(fopen)
texts += amazon['negative']
labels += [0] * len(amazon['negative'])
texts += amazon['positive']
labels += [1] * len(amazon['positive'])
texts += imdb['negative']
labels += [0] * len(imdb['negative'])
texts += imdb['positive']
labels += [1] * len(imdb['positive'])
texts += yelp['negative']
labels += [0] * len(yelp['negative'])
texts += yelp['positive']
labels += [1] * len(yelp['positive'])
import os
for i in [i for i in os.listdir('negative') if 'Store' not in i]:
with open('negative/'+i) as fopen:
a = json.load(fopen)
texts += a
labels += [0] * len(a)
import os
for i in [i for i in os.listdir('positive') if 'Store' not in i]:
with open('positive/'+i) as fopen:
a = json.load(fopen)
texts += a
labels += [1] * len(a)
for i in range(len(texts)):
texts[i] = classification_textcleaning(texts[i])
concat = ' '.join(texts).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])
max_features = len(dictionary)
maxlen = 100
batch_size = 32
embedded_size = 256
train_X, test_X, train_Y, test_Y = train_test_split(texts,
labels,
test_size = 0.2)
class Model:
def __init__(
self, embedded_size, dict_size, dimension_output, learning_rate
):
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
encoder_embeddings = tf.Variable(
tf.random_uniform([dict_size, embedded_size], -1, 1)
)
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
self.logits = tf.identity(
tf.layers.dense(
tf.reduce_mean(encoder_embedded, 1), dimension_output
),
name = 'logits',
)
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y
)
)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
self.cost
)
correct_pred = tf.equal(
tf.argmax(self.logits, 1, output_type = tf.int32), self.Y
)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(embedded_size, max_features, 2, 5e-4)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'fast-text/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'logits' in n.name)
and 'Adam' not in n.name
and 'beta' not in n.name
]
)
strings.split(',')
tf.trainable_variables()
from tqdm import tqdm
import time
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n' % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = str_idx(train_X[i : min(i + batch_size, len(train_X))], dictionary, maxlen)
batch_y = train_Y[i : min(i + batch_size, len(train_X))]
batch_x_expand = np.expand_dims(batch_x,axis = 1)
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.Y: batch_y,
model.X: batch_x
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_X), batch_size), desc = 'test minibatch loop')
for i in pbar:
batch_x = str_idx(test_X[i : min(i + batch_size, len(test_X))], dictionary, maxlen)
batch_y = test_Y[i : min(i + batch_size, len(test_X))]
batch_x_expand = np.expand_dims(batch_x,axis = 1)
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.Y: batch_y,
model.X: batch_x
},
)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
if test_acc > CURRENT_ACC:
print(
'epoch: %d, pass acc: %f, current acc: %f'
% (EPOCH, CURRENT_ACC, test_acc)
)
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
batch_x = str_idx(test_X[i : min(i + batch_size, len(test_X))], dictionary, maxlen)
batch_y = test_Y[i : min(i + batch_size, len(test_X))]
predict_Y += np.argmax(
sess.run(
model.logits, feed_dict = {model.X: batch_x, model.Y: batch_y}
),
1,
).tolist()
real_Y += batch_y
saver.save(sess, 'fast-text/model.ckpt')
print(
metrics.classification_report(
real_Y, predict_Y, target_names = ['negative', 'positive']
)
)
text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
new_vector = str_idx([classification_textcleaning(text)], dictionary, len(text.split()))
sess.run(tf.nn.softmax(model.logits), feed_dict={model.X:new_vector})
import json
with open('fast-text-sentiment.json','w') as fopen:
fopen.write(json.dumps({'dictionary':dictionary,'reverse_dictionary':rev_dictionary}))
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('fast-text', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('fast-text/frozen_model.pb')
x = g.get_tensor_by_name('import/Placeholder:0')
logits = g.get_tensor_by_name('import/logits:0')
test_sess = tf.InteractiveSession(graph = g)
test_sess.run(tf.nn.softmax(logits), feed_dict = {x: new_vector})
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import copy
def empty_mask(size):
return np.zeros((size,size))
def circular_mask(size):
y,x = np.mgrid[:size, :size]
M = np.zeros((size,size))
x0 = y0 = (size-1)/2
r = size/4
M[(x-x0)**2+(y-y0)**2<=r**2]=1
return M
def rectangle_mask(size):
y,x = np.mgrid[:size, :size]
M = np.zeros((size,size))
x0 = y0 = (size-1)/2
r = size/4
M[((x-x0)**2<=r**2)*((y-y0)**2<=r**2)]=1
return M
def get_plane_wave(E0,k,size):
y,x = np.mgrid[:size, :size]
a = np.pi*0/180
E = E0*np.exp(-1j*k*(x*np.cos(a)+y*np.sin(a)))
return(E)
def get_greenfun(r,k):
return (1j/4)*scipy.special.hankel1(0,k*r)
def get_green_matrix(k,size):
j,i = np.mgrid[:size, :size]
ij_block = np.sqrt((i-1/2)**2+j**2)
green_mat = get_greenfun(ij_block,k)
return green_mat
# def get_toeplitz_mat(ij_block):
# ij_block = copy.deepcopy(ij_block)
# T = np.block([[ij_block,ij_block[:,:0:-1]],
# [ij_block[:0:-1,:],ij_block[:0:-1,:0:-1]]])
# return T
def get_toeplitz_mat(ij_block):
ij_block = copy.deepcopy(ij_block)
T1 = np.hstack((ij_block,ij_block[:,:0:-1]))
T2 = np.hstack((ij_block[:0:-1,:],ij_block[:0:-1,:0:-1]))
T = np.vstack((T1,T2))
return T
def G_matvec(vec,k):
size = int(np.sqrt(vec.shape[0]))
G_block = get_green_matrix(k,size)
G = get_toeplitz_mat(G_block)
mat = np.zeros((2*size-1,2*size-1),dtype = np.complex64)
mat_block = vec.reshape((-1,size))
mat[:size,:size] = mat_block
out_mat = np.fft.ifft2(np.fft.fft2(G)*np.fft.fft2(mat))
out = out_mat[:size,:size].reshape((-1,1))
return out
def get_eps_from_mask(e,mask):
return (e-1)*mask.reshape((-1,1))+1
def matvec(x,eps,k):
x = x.reshape((-1,1))
#print(x)
size = x.shape[0]
chi = k**2*(eps - 1)
return x-G_matvec(x*chi,k)
def old_matvec(x,mask,k,e):
eps = get_eps_from_mask(e,mask)
return matvec(x,eps,k)
def visualize(data,title = "",cmap='jet',):
plt.title(title)
neg = plt.imshow(data, cmap=cmap, interpolation='none')
plt.colorbar(neg)
plt.show()
def solve(E,eps0,eps1):
return E
size = 16
e =1.5# 2.25
k = 2*np.pi/(size/1)
F = get_plane_wave(1,k,size)
#mask = empty_mask(size)
#mask = rectangle_mask(size)
mask = circular_mask(size)
eps = get_eps_from_mask(e,mask)
visualize(F.real,"Initial field (real part)")
visualize(mask,"Mask","gray")
import scipy.sparse.linalg as spla
import inspect
import time
x_last = get_plane_wave(1,k,size).reshape(-1,1)
def plot__solution_re_im_abs_mask(solution, size):
solution_re = solution.real.reshape(-1,size)
solution_im = solution.imag.reshape(-1,size)
solution_abs = np.abs(solution).reshape(-1,size)
solution_abs_mask = np.abs(solution).reshape(-1,size)*(1-mask)
visualize(solution_re,"Real")
visualize(solution_im,"Imag")
visualize(solution_abs,"Abs","gray")
visualize(solution_abs_mask,"Abs with mask")
return solution_re, solution_im, solution_abs, solution_abs_mask
def plot_relative_residuals_norms(t, residuals, relative_vector):
plt.semilogy(t, residuals/np.linalg.norm(relative_vector), 'x-', label="Generalized Minimal RESidual iterations")
plt.legend()
plt.title('Relative residual (depends on time), number of iterations = %i' % len(residuals))
plt.xlabel('Seconds')
plt.ylabel('Relative residual norm')
plt.show()
plt.semilogy(np.arange(len(residuals), 0, -1), residuals/np.linalg.norm(relative_vector), label="Generalized Minimal RESidual iterations")
plt.legend()
plt.title('Relative residual (depends on number of step), number of iterations = %i' % len(residuals))
plt.xlabel('Number of step')
plt.ylabel('Relative residual norm')
plt.show()
def gmres_solver(A, b, x0, maxiter, tol,
draw_graph_flag = False,
convergence_info = False,
display_convergence_info = False,
display_achieved_tolerance = False):
gmres_residuals_with_t = []
t0 = time.time()
solution, info = spla.gmres(A, b, x0=x0, maxiter = maxiter, tol = tol, restart = maxiter, callback = lambda x:
gmres_residuals_with_t.append([(inspect.currentframe().f_back).f_locals['resid'], time.time()])
)
if len(gmres_residuals_with_t)>1:
gmres_residuals_with_t = np.array(gmres_residuals_with_t).T
gmres_residuals_with_t[1] = gmres_residuals_with_t[1]-t0
gmres_t, gmres_residuals = gmres_residuals_with_t
else:
gmres_t, gmres_residuals = [],[]
if (display_convergence_info == True):
if (info == 0):
print("Status: Converged, successful exit")
else:
if (info > 0):
print("Status: Convergence to tolerance not achieved, number of iterations")
else:
print("Status: Illegal input or breakdown")
if ( draw_graph_flag == True ):
plot_relative_residuals_norms(gmres_t, gmres_residuals, b)
if ( display_achieved_tolerance == True):
print('Achieved tolerance = ', np.linalg.norm(A.dot(solution.reshape(-1,1))-b)/np.linalg.norm(b))
if (convergence_info == True):
return solution, info
return solution
def launch_solver(eps, k, x0 = None ,maxiter=300, tol = 1e-6):
global x_last
size = int(np.sqrt(eps.shape[0]))
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = get_plane_wave(1,k,size).reshape(-1,1)
if x0 is None:
x0 = x_last
solution, info = gmres_solver(A, b, x0,
maxiter=maxiter,
tol=tol,
convergence_info = True)
x_last = solution.reshape(-1,1)
return solution, info
def show_residuals(eps, k, maxiter=300, tol = 1e-6):
size = int(np.sqrt(eps.shape[0]))
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = get_plane_wave(1,k,size).reshape(-1,1)
x0 = np.ones(size**2).reshape(-1,1)
gmres_solver(A, b, x0,
maxiter=maxiter,
tol=tol,
draw_graph_flag = True)
t = time.time()
solution, info = launch_solver(eps=eps, k=k)
print(t-time.time())
show_residuals(eps=eps, k=k)
solution_re, solution_im, solution_abs, solution_abs_mask = plot__solution_re_im_abs_mask(solution, size)
def choose_direction(eps, k, maxiter=300, tol=1e-6, x=None):
if x is None:
x, info = launch_solver(eps=eps, k=k, maxiter=maxiter, tol=tol)
x_abs = np.abs(x)
x_max = np.max(x_abs)
indeces = np.argwhere( x_abs == x_max )
choose_direction = np.zeros(x.shape[0], dtype = np.complex64)
choose_direction[indeces] = (np.sign(x.real)/2+1j*np.sign(x.imag)/2)[indeces]/indeces.shape[0]
return choose_direction
def get_Jacobi_diagonal(mask, e, k, eps = None, x0 = None , maxiter=300, tol = 1e-6):
if eps is None:
eps = get_eps_from_mask(e,mask)
solution, info = launch_solver(eps=eps, x0=x0, k=k, maxiter=maxiter, tol = tol)
solution_with_coeff = k**2*(e-1)*solution
zero_vector = np.zeros(solution_with_coeff.shape[0], dtype = np.complex64)
Jacobi_diagonal = np.zeros(solution.shape[0], dtype = np.complex64 )
for i in range(solution.shape[0]):
solution_sparse_column = zero_vector.copy()
solution_sparse_column[i] = solution_with_coeff[i]
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = G_matvec(solution_sparse_column, k)
Jacobi_diagonal[i] = gmres_solver(A=A, b=b, x0=solution, maxiter=maxiter, tol=tol)[i]
return Jacobi_diagonal
def get_grad(mask, e=e, k=k, x = None, eps = None, x0 = None , maxiter=300, tol = 1e-6):
if eps is None:
eps = get_eps_from_mask(e,mask)
solution, info = launch_solver(eps=eps, x0=x0, k=k, maxiter=maxiter, tol = tol)
direction = choose_direction(eps=eps, k=k, maxiter=maxiter, tol=tol, x=solution)
solution_with_coeff = k**2*(e-1)*solution
zero_vector = np.zeros(solution_with_coeff.shape[0], dtype = np.complex64)
Jacobi_diagonal = np.zeros(solution.shape[0], dtype = np.complex64 )
for i in np.argwhere(direction!=0):
solution_sparse_column = zero_vector.copy()
solution_sparse_column[i] = solution_with_coeff[i]
A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda x: matvec(x,eps,k))
b = G_matvec(solution_sparse_column, k)
Jacobi_diagonal[i] = gmres_solver(A=A, b=b, x0=solution, maxiter=maxiter, tol=tol)[i]
return np.abs(Jacobi_diagonal)
print(get_grad(mask, e, k, maxiter=300, tol = 1e-6))
from scipy.optimize import minimize
def plot_solution(y):
mask = get_fild_value(y,20)
print(np.min(mask))
print(np.max(mask))
eps = get_eps_from_mask(e,mask).reshape((-1,1))
print(np.min(eps))
print(np.max(eps))
field, info = launch_solver(eps=eps, k=k)
visualize(mask,"Mask","gray")
#visualize(field.real.reshape(-1,size),"Field (Real part)")
visualize(np.abs(field).reshape(-1,size),"Field (Abs)")
print(objective(y))
print(np.max(np.abs(field)))
i=0
def get_fild_value(y,p):
x = (np.tanh(p*y)+1)/2
return x
def callback(x):
global i
i+=1
print(i)
def penalty(x,p):
return np.sum(1-x**p-(1-x)**p)
#return np.sum(x*(1-x))
#obj = 0
def objective(y):
mask = get_fild_value(y,4)
eps = get_eps_from_mask(e,mask).reshape((-1,1))
field, info = launch_solver(eps=eps, k=k)
#global obj
mask = get_fild_value(y,20)
eps = get_eps_from_mask(e,mask).reshape((-1,1))
field, info = launch_solver(eps=eps, k=k)
if info !=0:
raise RuntimeError()
obj = -np.max(np.abs(field))#+penalty(mask,20)*1
#print(obj)
return obj
# x_empty_ind = np.argwhere((-0.1<mask)*(mask<0.1))
# x_empty = x[x_empty_ind]
# x_empty = x
# if info != 0:
# raise RuntimeError()
# if x_empty.shape[0]!=0:
# #print(np.max(x_empty.imag))
# obj = -np.max(np.abs(x_empty))+penalty(mask,20)*0.001
# else:
# obj = penalty(mask,20)*0.001
# #print(obj)
# return obj
def get_random_mask(size):
mask = np.random.rand(size,size)
return mask
# def search_with_restarts(num):
#y = np.random.random(size,size)
# mask =circular_mask(size)
noize = (get_random_mask(size)-0.5)*10
# mask = (mask + noize)/np.max(noize+0.001)
y = circular_mask(size)-0.5+noize
obj0 = objective(y)
mask = get_fild_value(y,20)
plot_solution(y)
#bns = tuple((0,1) for _ in range(size**2))
sol = minimize(objective,y,method = "BFGS",options={'maxiter': 10, 'gtol':1e-9}, callback = callback)
best_y = sol.x.reshape(-1,size)
plot_solution(best_y)
print(obj0)
# import cvxpy as cvx
# size = 2
# k = 2*np.pi/(size/7)
# F = get_plane_wave(1,k,size)
# x = cvx.Variable(size**2)
# eps = cvx.Variable(size**2)
# y = cvx.Variable(1)
# # lambda val: matvec2(val,eps,k,e
# obj = cvx.Maximize(y)
# #A = spla.LinearOperator(shape = (size**2, size**2), matvec = lambda val: val)
# #print(A.dot([1,1,0,0]))
# costrs = [x>F.reshape(-1,1),y>=x]
# prob = cvx.Problem(obj,costrs)
# prob.solve()
# print(prob.value)
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Challenge Notebook
## Problem: Given an array of (unix_timestamp, num_people, EventType.ENTER or EventType.EXIT), find the busiest period.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
* [Solution Notebook](#Solution-Notebook)
## Constraints
* Can we assume the input array is valid?
* Check for None
* Can we assume the elements of the input array are valid?
* Yes
* Is the input sorted by time?
* No
* Can you have enter and exit elements for the same timestamp?
* Yes you can, order of enter and exit is not guaranteed
* Could we have multiple enter events (or multiple exit events) for the same timestamp?
* No
* What is the format of the output?
* An array of timestamps [t1, t2]
* Can we assume the starting number of people is zero?
* Yes
* Can we assume the inputs are valid?
* No
* Can we assume this fits memory?
* Yes
## Test Cases
* None -> TypeError
* [] -> None
* General case
<pre>
timestamp num_people event_type
1 2 EventType.ENTER
3 1 EventType.ENTER
3 2 EventType.EXIT
7 3 EventType.ENTER
8 2 EventType.EXIT
9 2 EventType.EXIT
result = Period(7, 8)
</pre>
## Algorithm
Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
## Code
```
from enum import Enum
class Data(object):
def __init__(self, timestamp, num_people, event_type):
self.timestamp = timestamp
self.num_people = num_people
self.event_type = event_type
def __lt__(self, other):
return self.timestamp < other.timestamp
class Period(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __repr__(self):
return str(self.start) + ', ' + str(self.end)
class EventType(Enum):
ENTER = 0
EXIT = 1
class Solution(object):
def find_busiest_period(self, data):
# TODO: Implement me
pass
```
## Unit Test
**The following unit test is expected to fail until you solve the challenge.**
```
# %load test_find_busiest_period.py
import unittest
class TestSolution(unittest.TestCase):
def test_find_busiest_period(self):
solution = Solution()
self.assertRaises(TypeError, solution.find_busiest_period, None)
self.assertEqual(solution.find_busiest_period([]), None)
data = [
Data(3, 2, EventType.EXIT),
Data(1, 2, EventType.ENTER),
Data(3, 1, EventType.ENTER),
Data(7, 3, EventType.ENTER),
Data(9, 2, EventType.EXIT),
Data(8, 2, EventType.EXIT),
]
self.assertEqual(solution.find_busiest_period(data), Period(7, 8))
print('Success: test_find_busiest_period')
def main():
test = TestSolution()
test.test_find_busiest_period()
if __name__ == '__main__':
main()
```
## Solution Notebook
Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| github_jupyter |
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
$ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
<font style="font-size:28px;" align="left"><b>Probabilistic States </b></font>
<br>
_prepared by Abuzer Yakaryilmaz_
<br><br>
[<img src="../qworld/images/watch_lecture.jpg" align="left">](https://youtu.be/tJjrF7WgT1g)
<br><br><br>
Suppose that Asja tosses a fair coin secretly.
As we do not see the result, our information about the outcome will be probabilistic:
$\rightarrow$ The outcome is heads with probability $0.5$ and the outcome will be tails with probability $0.5$.
If the coin has a bias $ \dfrac{Pr(Head)}{Pr(Tail)} = \dfrac{3}{1}$, then our information about the outcome will be as follows:
$\rightarrow$ The outcome will be heads with probability $ 0.75 $ and the outcome will be tails with probability $ 0.25 $.
<i><u>Explanation</u>: The probability of getting heads is three times of the probability of getting tails.
<ul>
<li>The total probability is 1. </li>
<li> We divide the whole probability 1 into four parts (three parts are for heads and one part is for tail),
<li> one part is $ \dfrac{1}{4} = 0.25$,</li>
<li> and then give three parts for heads ($0.75$) and one part for tails ($0.25$).</li>
</ul></i>
<h3> Listing probabilities as a column </h3>
We have two different outcomes: heads (0) and tails (1).
We use a column of size 2 to show the probabilities of getting heads and getting tails.
For the fair coin, our information after the coin-flip will be $ \myvector{0.5 \\ 0.5} $.
For the biased coin, it will be $ \myvector{0.75 \\ 0.25} $.
The first entry shows the probability of getting heads, and the second entry shows the probability of getting tails.
$ \myvector{0.5 \\ 0.5} $ and $ \myvector{0.75 \\ 0.25} $ are two examples of 2-dimensional (column) vectors.
<h3> Task 1 </h3>
Suppose that Balvis secretly flips a coin having the bias $ \dfrac{Pr(Heads)}{Pr(Tails)} = \dfrac{1}{4}$.
Represent your information about the outcome as a column vector.
<h3> Task 2 </h3>
Suppose that Fyodor secretly rolls a loaded (tricky) dice with the bias
$$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$
Represent your information about the result as a column vector. Remark that the size of your column vector should be 6.
You may use python for your calculations.
```
#
# your code is here
#
```
<a href="CS16_Probabilistic_States_Solutions.ipynb#task2">click for our solution</a>
<h3> Vector representation </h3>
Suppose that we have a system with 4 distiguishable states: $ s_1 $, $s_2 $, $s_3$, and $s_4$.
We expect the system to be in one of them at any moment.
By speaking with probabilities, we say that the system is in one of the states with probability 1, and in any other state with probability 0.
By using our column representation, we can show each state as a column vector (by using the vectors in standard basis of $ \mathbb{R}^4 $):
$
e_1 = \myvector{1\\ 0 \\ 0 \\ 0}, e_2 = \myvector{0 \\ 1 \\ 0 \\ 0}, e_3 = \myvector{0 \\ 0 \\ 1 \\ 0},
\mbox{ and } e_4 = \myvector{0 \\ 0 \\ 0 \\ 1}.
$
This representation helps us to represent our information on a system when it is in more than one state with certain probabilities.
Remember the case in which the coins are tossed secretly.
For example, suppose that the system is in states $ s_1 $, $ s_2 $, $ s_3 $, and $ s_4 $ with probabilities $ 0.20 $, $ 0.25 $, $ 0.40 $, and $ 0.15 $, respectively.
(<i>The total probability must be 1, i.e., $ 0.20+0.25+0.40+0.15 = 1.00 $</i>)
Then, we can say that the system is in the following probabilistic state:
$ 0.20 \cdot e_1 + 0.25 \cdot e2 + 0.40 \cdot e_3 + 0.15 \cdot e4 $
$ = 0.20 \cdot \myvector{1\\ 0 \\ 0 \\ 0} + 0.25 \cdot \myvector{0\\ 1 \\ 0 \\ 0} + 0.40 \cdot \myvector{0\\ 0 \\ 1 \\ 0} + 0.15 \cdot \myvector{0\\ 0 \\ 0 \\ 1} $
$ = \myvector{0.20\\ 0 \\ 0 \\ 0} + \myvector{0\\ 0.25 \\ 0 \\ 0} + \myvector{0\\ 0 \\0.40 \\ 0} + \myvector{0\\ 0 \\ 0 \\ 0.15 } = \myvector{ 0.20 \\ 0.25 \\ 0.40 \\ 0.15 }, $
where the summation of entries must be 1.
<h3> Probabilistic state </h3>
A probabilistic state is a linear combination of the vectors in the standard basis.
Here coefficients (scalars) must satisfy certain properties:
<ol>
<li> Each coefficient is non-negative </li>
<li> The summation of coefficients is 1 </li>
</ol>
Alternatively, we can say that a probabilistic state is a probability distribution over deterministic states.
We can show all information as a single mathematical object, which is called as a stochastic vector.
<i> Remark that the state of any linear system is a linear combination of the vectors in the basis. </i>
<h3> Task 3 </h3>
For a system with 4 states, randomly create a probabilistic state, and print its entries, e.g., $ 0.16~~0.17~~0.02~~0.65 $.
<i>Hint: You may pick your random numbers between 0 and 100 (or 1000), and then normalize each value by dividing the summation of all numbers.</i>
```
#
# your solution is here
#
```
<a href="CS16_Probabilistic_States_Solutions.ipynb#task3">click for our solution</a>
<h3> Task 4 [extra] </h3>
As given in the hint for Task 3, you may pick your random numbers between 0 and $ 10^k $. For better precision, you may take bigger values of $ k $.
Write a function that randomly creates a probabilisitic state of size $ n $ with a precision up to $ k $ digits.
Test your function.
```
#
# your solution is here
#
```
| github_jupyter |
# Datasets and Neural Networks
This notebook will step through the process of loading an arbitrary dataset in PyTorch, and creating a simple neural network for regression.
# Datasets
We will first work through loading an arbitrary dataset in PyTorch. For this project, we chose the <a href="http://www.cs.toronto.edu/~delve/data/abalone/desc.html">delve abalone dataset</a>.
First, download and unzip the dataset from the link above, then unzip `Dataset.data.gz` and move `Dataset.data` into `hackpack-ml/models/data`.
We are given the following attribute information in the spec:
```
Attributes:
1 sex u M F I # Gender or Infant (I)
2 length u (0,Inf] # Longest shell measurement (mm)
3 diameter u (0,Inf] # perpendicular to length (mm)
4 height u (0,Inf] # with meat in shell (mm)
5 whole_weight u (0,Inf] # whole abalone (gr)
6 shucked_weight u (0,Inf] # weight of meat (gr)
7 viscera_weight u (0,Inf] # gut weight (after bleeding) (gr)
8 shell_weight u (0,Inf] # after being dried (gr)
9 rings u 0..29 # +1.5 gives the age in years
```
```
import math
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
import pandas as pd
from torch.utils.data import Dataset, DataLoader
```
Pandas is a data manipulation library that works really well with structured data. We can use Pandas DataFrames to load the dataset.
```
col_names = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight', 'rings']
abalone_df = pd.read_csv('../data/Dataset.data', sep=' ', names=col_names)
abalone_df.head(n=3)
```
We define a subclass of PyTorch Dataset for our Abalone dataset.
```
class AbaloneDataset(data.Dataset):
"""Abalone dataset. Provides quick iteration over rows of data."""
def __init__(self, csv):
"""
Args: csv (string): Path to the Abalone dataset.
"""
self.features = ['sex', 'length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight']
self.y = ['rings']
self.abalone_df = pd.read_csv(csv, sep=' ', names=(self.features + self.y))
# Turn categorical data into machine interpretable format (one hot)
self.abalone_df['sex'] = pd.get_dummies(self.abalone_df['sex'])
def __len__(self):
return len(self.abalone_df)
def __getitem__(self, idx):
"""Return (x,y) pair where x are abalone features and y is age."""
features = self.abalone_df.iloc[idx][self.features].values
y = self.abalone_df.iloc[idx][self.y]
return torch.Tensor(features).float(), torch.Tensor(y).float()
```
# Neural Networks
The task is to predict the age (number of rings) of abalone from physical measurements. We build a simple neural network with one hidden layer to model the regression.
```
class Net(nn.Module):
def __init__(self, feature_size):
super(Net, self).__init__()
# feature_size input channels (8), 1 output channels
self.fc1 = nn.Linear(feature_size, 4)
self.fc2 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
```
We instantiate an Abalone dataset instance and create DataLoaders for train and test sets.
```
dataset = AbaloneDataset('../data/Dataset.data')
train_split, test_split = math.floor(len(dataset) * 0.8), math.ceil(len(dataset) * 0.2)
trainset = [dataset[i] for i in range(train_split)]
testset = [dataset[train_split + j] for j in range(test_split)]
batch_sz = len(trainset) # Compact data allows for big batch size
trainloader = data.DataLoader(trainset, batch_size=batch_sz, shuffle=True, num_workers=4)
testloader = data.DataLoader(testset, batch_size=batch_sz, shuffle=False, num_workers=4)
```
Now, we can initialize our network and define train and test functions
```
net = Net(len(dataset.features))
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
gpu_ids = [0] # On Colab, we have access to one GPU. Change this value as you see fit
def train(epoch):
"""
Trains our net on data from the trainloader for a single epoch
"""
net.train()
with tqdm(total=len(trainloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad() # Clear any stored gradients for new step
outputs = net(inputs.float())
loss = loss_fn(outputs, targets) # Calculate loss between prediction and label
loss.backward() # Backpropagate gradient updates through net based on loss
optimizer.step() # Update net weights based on gradients
progress_bar.set_postfix(loss=loss.item())
progress_bar.update(inputs.size(0))
def test(epoch):
"""
Run net in inference mode on test data.
"""
net.eval()
# Ensures the net will not update weights
with torch.no_grad():
with tqdm(total=len(testloader.dataset)) as progress_bar:
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device).float(), targets.to(device).float()
outputs = net(inputs)
loss = loss_fn(outputs, targets)
progress_bar.set_postfix(testloss=loss.item())
progress_bar.update(inputs.size(0))
```
Now that everything is prepared, it's time to train!
```
test_freq = 5 # Frequency to run model on validation data
for epoch in range(0, 200):
train(epoch)
if epoch % test_freq == 0:
test(epoch)
```
We use the network's eval mode to do a sample prediction to see how well it does.
```
net.eval()
sample = testset[0]
predicted_age = net(sample[0])
true_age = sample[1]
print(f'Input features: {sample[0]}')
print(f'Predicted age: {predicted_age.item()}, True age: {true_age[0]}')
```
Congratulations! You now know how to load your own datasets into PyTorch and run models on it. For an example of Computer Vision, check out the DenseNet notebook. Happy hacking!
| github_jupyter |
# Optimization with equality constraints
```
import math
import numpy as np
from scipy import optimize as opt
```
maximize $.4\,\log(x_1)+.6\,\log(x_2)$ s.t. $x_1+3\,x_2=50$.
```
I = 50
p = np.array([1, 3])
U = lambda x: (.4*math.log(x[0])+.6*math.log(x[1]))
x0 = (I/len(p))/np.array(p)
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
def consumer(U, p, I):
budget = ({'type': 'eq', 'fun': lambda x: I-np.sum(np.multiply(x, p))})
x0 = (I/len(p))/np.array(p)
sol = opt.minimize(lambda x: -U(x), x0, method='SLSQP', constraints=budget, tol=1e-08,
options={'disp': False, 'ftol': 1e-08})
if sol.status == 0:
return {'x': sol.x, 'V': -sol.fun, 'MgU': -sol.jac, 'mult': -sol.jac[0]/p[0]}
else:
return 0
consumer(U, p, I)
delta=.01
(consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
delta=.001
numerador = (consumer(U,p+np.array([delta, 0]), I)['V']-consumer(U,p+np.array([-delta, 0]), I)['V'])/(2*delta)
denominador = (consumer(U, p, I+delta)['V']-consumer(U, p, I-delta)['V'])/(2*delta)
-numerador/denominador
```
## Cost function
```
# Production function
F = lambda x: (x[0]**.8)*(x[1]**.2)
w = np.array([5, 4])
y = 1
constraint = ({'type': 'eq', 'fun': lambda x: y-F(x)})
x0 = np.array([.5, .5])
cost = opt.minimize(lambda x: w@x, x0, method='SLSQP', constraints=constraint, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
F(cost.x)
cost
```
## Exercise
```
a = 2
u = lambda c: -np.exp(-a*c)
R = 2
Z2 = np.array([.72, .92, 1.12, 1.32])
Z3 = np.array([.86, .96, 1.06, 1.16])
def U(x):
states = len(Z2)*len(Z3)
U = u(x[0])
for z2 in Z2:
for z3 in Z3:
U += (1/states)*u(x[1]*R+x[2]*z2+x[3]*z3)
return U
p = np.array([1, 1, .5, .5])
I = 4
# a=1
consumer(U, p, I)
# a=5
consumer(U, p, I)
# a=2
consumer(U, p, I)
import matplotlib.pyplot as plt
x = np.arange(0.0, 2.0, 0.01)
a = 2
u = lambda c: -np.exp(-a*c)
plt.plot(x, u(x))
a = -2
plt.plot(x, u(x))
```
# Optimization with inequality constraints
```
f = lambda x: -x[0]**3+x[1]**2-2*x[0]*(x[2]**2)
constraints =({'type': 'eq', 'fun': lambda x: 2*x[0]+x[1]**2+x[2]-5},
{'type': 'ineq', 'fun': lambda x: 5*x[0]**2-x[1]**2-x[2]-2})
constraints =({'type': 'eq', 'fun': lambda x: x[0]**3-x[1]})
x0 = np.array([.5, .5, 2])
opt.minimize(f, x0, method='SLSQP', constraints=constraints, tol=1e-08,
options={'disp': True, 'ftol': 1e-08})
```
| github_jupyter |
```
import sys
sys.path.append('../')
%load_ext autoreload
%autoreload 2
import sklearn
import copy
import numpy as np
import seaborn as sns
sns.set()
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
# from viz import viz
from bokeh.plotting import figure, show, output_notebook, output_file, save
from functions import merge_data
from sklearn.model_selection import RandomizedSearchCV
import load_data
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from fit_and_predict import fit_and_predict
```
## Params:
```
aggregate_by_state = False
outcome_type = 'cases'
```
## Basic Data Visualization
```
# Just something to quickly summarize the number of cases and distributions each day
# 'deaths' and 'cases' contain the time-series of the outbreak
df = load_data.load_county_level(data_dir = '../data/')
df = df.sort_values('#Deaths_3/30/2020', ascending=False)
# outcome_cases = load_data.outcome_cases # most recent day
# outcome_deaths = load_data.outcome_deaths
important_vars = load_data.important_keys(df)
very_important_vars = ['PopulationDensityperSqMile2010',
# 'MedicareEnrollment,AgedTot2017',
'PopulationEstimate2018',
'#ICU_beds',
'MedianAge2010',
'Smokers_Percentage',
'DiabetesPercentage',
'HeartDiseaseMortality',
'#Hospitals'
# 'PopMale60-642010',
# 'PopFmle60-642010',
# 'PopMale65-742010',
# 'PopFmle65-742010',
# 'PopMale75-842010',
# 'PopFmle75-842010',
# 'PopMale>842010',
# 'PopFmle>842010'
]
def sum_lists(list_of_lists):
arr = np.array(list(list_of_lists))
sum_arr = np.sum(arr,0)
return list(sum_arr)
if aggregate_by_state:
# Aggregate by State
state_deaths_df = df.groupby('StateNameAbbreviation').deaths.agg(sum_lists).to_frame()
state_cases_df = df.groupby('StateNameAbbreviation').cases.agg(sum_lists).to_frame()
df = pd.concat([state_cases_df,state_deaths_df],axis =1 )
# Distribution of the maximum number of cases
_cases = list(df['cases'])
max_cases = []
for i in range(len(df)):
max_cases.append(max(_cases[i]))
print('Number of counties with non-zero cases')
print(sum([v >0 for v in max_cases]))
# cases truncated below 20 and above 1000 for plot readability
plt.hist([v for v in max_cases if v > 20 and v < 1000],bins = 100)
sum(max_cases)
print(sum([v > 50 for v in max_cases]))
np.quantile(max_cases,.5)
# Distribution of the maximum number of cases
_deaths = list(df['deaths'])
max_deaths = []
for i in range(len(df)):
max_deaths.append(max(_deaths[i]))
print('Number of counties with non-zero deaths')
print(sum([v > 0 for v in max_deaths]))
# plt.hist(max_cases)
# print(sum([v >0 for v in max_cases]))
plt.hist([v for v in max_deaths if v > 5],bins=30)
sum(max_deaths)
max(max_deaths)
np.quantile(max_deaths,.7)
```
### Clean data
```
# Remove counties with zero cases
max_cases = [max(v) for v in df['cases']]
df['max_cases'] = max_cases
max_deaths = [max(v) for v in df['deaths']]
df['max_deaths'] = max_deaths
df = df[df['max_cases'] > 0]
```
## Predict data from model:
```
method_keys = []
# clear predictions
for m in method_keys:
del df[m]
# target_day = np.array([1])
# # Trains model on train_df and produces predictions for the final day for test_df and writes prediction
# # to a new column for test_df
# # fit_and_predict(df, method='exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',target_day=target_day)
# # fit_and_predict(train_df, test_df,'shared_exponential', mode='eval_mode',demographic_vars=important_vars)
# # fit_and_predict(df,method='shared_exponential', outcome=outcome_type, mode='eval_mode',demographic_vars=very_important_vars,target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=target_day)
# fit_and_predict(df, outcome=outcome_type, mode='eval_mode',demographic_vars=[],
# method='ensemble',target_day=np.array([1,2,3]))
# # fit_and_predict(train_df, test_d f,method='exponential',mode='eval_mode',target_day = np.array([1,2]))
# # Finds the names of all the methods
# method_keys = [c for c in df if 'predicted' in c]
# method_keys
# for days_ahead in [1, 2, 3]:
# for method in ['exponential', 'shared_exponential', 'ensemble']:
# fit_and_predict(df, method=method, outcome=outcome_type, mode='eval_mode',target_day=np.array([days_ahead]))
# if method == 'shared_exponential':
# fit_and_predict(df,method='shared_exponential',
# outcome=outcome_type,
# mode='eval_mode',
# demographic_vars=very_important_vars,
# target_day=np.array([days_ahead]))
# method_keys = [c for c in df if 'predicted' in c]
# geo = ['countyFIPS', 'CountyNamew/StateAbbrev']
# method_keys = [c for c in df if 'predicted' in c]
# df_preds = df[method_keys + geo + ['deaths']]
# df_preds.to_pickle("multi_day_6.pkl")
```
## Ensemble predictions
```
exponential = {'model_type':'exponential'}
shared_exponential = {'model_type':'shared_exponential'}
demographics = {'model_type':'shared_exponential', 'demographic_vars':very_important_vars}
linear = {'model_type':'linear'}
# import fit_and_predict
# for d in [1, 2, 3]:
# df = fit_and_predict.fit_and_predict_ensemble(df,
# target_day=np.array([d]),
# mode='eval_mode',
# outcome=outcome_type,
# output_key=f'predicted_{outcome_type}_ensemble_{d}'
# )
import fit_and_predict
for d in [1, 3, 5, 7]:
df = fit_and_predict.fit_and_predict_ensemble(df,
target_day=np.array(range(1, d+1)),
mode='eval_mode',
outcome=outcome_type,
methods=[exponential,
shared_exponential,
demographics,
linear
],
output_key=f'predicted_{outcome_type}_ensemble_{d}_with_exponential'
)
method_keys = [c for c in df if 'predicted' in c]
# df = fit_and_predict.fit_and_predict_ensemble(df)
method_keys
```
## Evaluate and visualize models
### Compute MSE and log MSE on relevant cases
```
# TODO: add average rank as metric
# Computes the mse in log space and non-log space for all columns
def l1(arr1,arr2,norm=True):
"""
arr2 ground truth
arr1 predictions
"""
if norm:
sum_percent_dif = 0
for i in range(len(arr1)):
sum_percent_dif += np.abs(arr2[i]-arr1[i])/arr1[i]
return sum_percent_dif/len(arr1)
return sum([np.abs(a1-a2) for (a1,a2) in zip(arr1,arr2)])/len(arr1)
mse = sklearn.metrics.mean_squared_error
# Only evaluate points that exceed this number of deaths
# lower_threshold, upper_threshold = 10, 100000
lower_threshold, upper_threshold = 10, np.inf
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][(outcome > lower_threshold)]] # * (outcome < upper_threshold)]]
print('Log scale MSE for '+key)
print(mse(np.log(outcome[(outcome > lower_threshold) * (outcome < upper_threshold)] + 1),preds))
# Log scaled
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1] + 1) for p in df[key][outcome > lower_threshold]]
print('Log scale l1 for '+key)
print(l1(np.log(outcome[outcome > lower_threshold] + 1),preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw MSE for '+key)
print(mse(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds))
# No log scale
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > lower_threshold]]
print('Raw l1 for '+key)
print(l1(outcome[outcome > lower_threshold],preds,norm=False))
```
### Plot residuals
```
# TODO: Create bounds automatically, create a plot function and call it instead of copying code, figure out way
# to plot more than two things at once cleanly
# Creates residual plots log scaled and raw
# We only look at cases with number of deaths greater than 5
def method_name_to_pretty_name(key):
# TODO: hacky, fix
words = key.split('_')
words2 = []
for w in words:
if not w.isnumeric():
words2.append(w)
else:
num = w
model_name = ' '.join(words2[2:])
# model_name = 'model'
if num == '1':
model_name += ' predicting 1 day ahead'
else:
model_name += ' predicting ' +w+' days ahead'
return model_name
# Make log plots:
bounds = [1.5, 7]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make log plots zoomed in for the counties that have a fewer number of deaths
bounds = [1.5, 4]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [np.log(p[-1]) for p in df[key][outcome > 5]]
plt.scatter(np.log(outcome[outcome > 5]),preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
# Make non-log plots zoomed in for the counties that have a fewer number of deaths# We set bounds
bounds = [10,400]
outcome = np.array([df[outcome_type].values[i][-1] for i in range(len(df))])
for key in method_keys:
preds = [p[-1] for p in df[key][outcome > 5]]
plt.scatter(outcome[outcome > 5],preds,label=method_name_to_pretty_name(key))
plt.xlabel('actual '+outcome_type)
plt.ylabel('predicted '+outcome_type)
plt.xlim(bounds)
plt.ylim(bounds)
plt.legend()
plt.plot(bounds, bounds, ls="--", c=".3")
plt.show()
```
### Graph Visualizations
```
# Here we visualize predictions on a per county level.
# The blue lines are the true number of deaths, and the dots are our predictions for each model for those days.
def plot_prediction(row):
"""
Plots model predictions vs actual
row: dataframe row
window: autoregressive window size
"""
gold_key = outcome_type
for i,val in enumerate(row[gold_key]):
if val > 0:
start_point = i
break
# plt.plot(row[gold_key][start_point:], label=gold_key)
if len(row[gold_key][start_point:]) < 3:
return
sns.lineplot(list(range(len(row[gold_key][start_point:]))),row[gold_key][start_point:], label=gold_key)
for key in method_keys:
preds = row[key]
sns.scatterplot(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=method_name_to_pretty_name(key))
# plt.scatter(list(range(len(row[gold_key][start_point:])))[-len(preds):],preds,label=key)
# plt.legend()
# plt.show()
# sns.legend()
plt.title(row['CountyName']+' in '+row['StateNameAbbreviation'])
plt.ylabel(outcome_type)
plt.xlabel('Days since first death')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure(dpi=500)
plt.show()
# feature_vals = {
# 'PopulationDensityperSqMile2010' : 1.1525491065255939e-05,
# "MedicareEnrollment,AgedTot2017" : -2.119520577282583e-06,
# 'PopulationEstimate2018' : 2.8898343032154275e-07,
# '#ICU_beds' : -0.000647030727828718,
# 'MedianAge2010' : 0.05032666600339253,
# 'Smokers_Percentage' : -0.013410742818946319,
# 'DiabetesPercentage' : 0.04395318355581005,
# 'HeartDiseaseMortality' : 0.0015473771787186525,
# '#Hospitals': 0.019248102357644396,
# 'log(deaths)' : 0.8805209010821442,
# 'bias' : -1.871552103871495
# }
df = df.sort_values(by='max_deaths',ascending=False)
for i in range(len(df)):
row = df.iloc[i]
# If number of deaths greater than 10
if max(row['deaths']) > 10:
print(row['CountyName']+' in '+row['StateNameAbbreviation'])
plot_prediction(row)
for v in very_important_vars:
print(v+ ': '+str(row[v])) #+';\t contrib: '+ str(feature_vals[v]*float(row[v])))
print('\n')
```
| github_jupyter |
```
#IMPORT SEMUA LIBARARY
#IMPORT LIBRARY PANDAS
import pandas as pd
#IMPORT LIBRARY UNTUK POSTGRE
from sqlalchemy import create_engine
import psycopg2
#IMPORT LIBRARY CHART
from matplotlib import pyplot as plt
from matplotlib import style
#IMPORT LIBRARY BASE PATH
import os
import io
#IMPORT LIBARARY PDF
from fpdf import FPDF
#IMPORT LIBARARY CHART KE BASE64
import base64
#IMPORT LIBARARY EXCEL
import xlsxwriter
#FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL
def uploadToPSQL(columns, table, filePath, engine):
#FUNGSI UNTUK MEMBACA CSV
df = pd.read_csv(
os.path.abspath(filePath),
names=columns,
keep_default_na=False
)
#APABILA ADA FIELD KOSONG DISINI DIFILTER
df.fillna('')
#MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN
del df['kategori']
del df['jenis']
del df['pengiriman']
del df['satuan']
#MEMINDAHKAN DATA DARI CSV KE POSTGRESQL
df.to_sql(
table,
engine,
if_exists='replace'
)
#DIHITUNG APABILA DATA YANG DIUPLOAD BERHASIL, MAKA AKAN MENGEMBALIKAN KELUARAN TRUE(BENAR) DAN SEBALIKNYA
if len(df) == 0:
return False
else:
return True
#FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT
#DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
def makeChart(host, username, password, db, port, table, judul, columns, filePath, name, subjudul, limit, negara, basePath):
#TEST KONEKSI DATABASE
try:
#KONEKSI KE DATABASE
connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db)
cursor = connection.cursor()
#MENGAMBL DATA DARI TABLE YANG DIDEFINISIKAN DIBAWAH, DAN DIORDER DARI TANGGAL TERAKHIR
#BISA DITAMBAHKAN LIMIT SUPAYA DATA YANG DIAMBIL TIDAK TERLALU BANYAK DAN BERAT
postgreSQL_select_Query = "SELECT * FROM "+table+" ORDER BY tanggal ASC LIMIT " + str(limit)
cursor.execute(postgreSQL_select_Query)
mobile_records = cursor.fetchall()
uid = []
lengthx = []
lengthy = []
#MELAKUKAN LOOPING ATAU PERULANGAN DARI DATA YANG SUDAH DIAMBIL
#KEMUDIAN DATA TERSEBUT DITEMPELKAN KE VARIABLE DIATAS INI
for row in mobile_records:
uid.append(row[0])
lengthx.append(row[1])
if row[2] == "":
lengthy.append(float(0))
else:
lengthy.append(float(row[2]))
#FUNGSI UNTUK MEMBUAT CHART
#bar
style.use('ggplot')
fig, ax = plt.subplots()
#MASUKAN DATA ID DARI DATABASE, DAN JUGA DATA TANGGAL
ax.bar(uid, lengthy, align='center')
#UNTUK JUDUL CHARTNYA
ax.set_title(judul)
ax.set_ylabel('Total')
ax.set_xlabel('Tanggal')
ax.set_xticks(uid)
#TOTAL DATA YANG DIAMBIL DARI DATABASE, DIMASUKAN DISINI
ax.set_xticklabels((lengthx))
b = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(b, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#line
#MASUKAN DATA DARI DATABASE
plt.plot(lengthx, lengthy)
plt.xlabel('Tanggal')
plt.ylabel('Total')
#UNTUK JUDUL CHARTNYA
plt.title(judul)
plt.grid(True)
l = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(l, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#pie
#UNTUK JUDUL CHARTNYA
plt.title(judul)
#MASUKAN DATA DARI DATABASE
plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%',
shadow=True, startangle=180)
plt.axis('equal')
p = io.BytesIO()
#CHART DISIMPAN KE FORMAT PNG
plt.savefig(p, format='png', bbox_inches="tight")
#CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64
pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "")
#CHART DITAMPILKAN
plt.show()
#MENGAMBIL DATA DARI CSV YANG DIGUNAKAN SEBAGAI HEADER DARI TABLE UNTUK EXCEL DAN JUGA PDF
header = pd.read_csv(
os.path.abspath(filePath),
names=columns,
keep_default_na=False
)
#MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN
header.fillna('')
del header['tanggal']
del header['total']
#MEMANGGIL FUNGSI EXCEL
makeExcel(mobile_records, header, name, limit, basePath)
#MEMANGGIL FUNGSI PDF
makePDF(mobile_records, header, judul, barChart, lineChart, pieChart, name, subjudul, limit, basePath)
#JIKA GAGAL KONEKSI KE DATABASE, MASUK KESINI UNTUK MENAMPILKAN ERRORNYA
except (Exception, psycopg2.Error) as error :
print (error)
#KONEKSI DITUTUP
finally:
if(connection):
cursor.close()
connection.close()
#FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER
def makeExcel(datarow, dataheader, name, limit, basePath):
#MEMBUAT FILE EXCEL
workbook = xlsxwriter.Workbook(basePath+'jupyter/BLOOMBERG/SektorEksternal/excel/'+name+'.xlsx')
#MENAMBAHKAN WORKSHEET PADA FILE EXCEL TERSEBUT
worksheet = workbook.add_worksheet('sheet1')
#SETINGAN AGAR DIBERIKAN BORDER DAN FONT MENJADI BOLD
row1 = workbook.add_format({'border': 2, 'bold': 1})
row2 = workbook.add_format({'border': 2})
#MENJADIKAN DATA MENJADI ARRAY
data=list(datarow)
isihead=list(dataheader.values)
header = []
body = []
#LOOPING ATAU PERULANGAN, KEMUDIAN DATA DITAMPUNG PADA VARIABLE DIATAS
for rowhead in dataheader:
header.append(str(rowhead))
for rowhead2 in datarow:
header.append(str(rowhead2[1]))
for rowbody in isihead[1]:
body.append(str(rowbody))
for rowbody2 in data:
body.append(str(rowbody2[2]))
#MEMASUKAN DATA DARI VARIABLE DIATAS KE DALAM COLUMN DAN ROW EXCEL
for col_num, data in enumerate(header):
worksheet.write(0, col_num, data, row1)
for col_num, data in enumerate(body):
worksheet.write(1, col_num, data, row2)
#FILE EXCEL DITUTUP
workbook.close()
#FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH FPDF
def makePDF(datarow, dataheader, judul, bar, line, pie, name, subjudul, lengthPDF, basePath):
#FUNGSI UNTUK MENGATUR UKURAN KERTAS, DISINI MENGGUNAKAN UKURAN A4 DENGAN POSISI LANDSCAPE
pdf = FPDF('L', 'mm', [210,297])
#MENAMBAHKAN HALAMAN PADA PDF
pdf.add_page()
#PENGATURAN UNTUK JARAK PADDING DAN JUGA UKURAN FONT
pdf.set_font('helvetica', 'B', 20.0)
pdf.set_xy(145.0, 15.0)
#MEMASUKAN JUDUL KE DALAM PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('arial', '', 14.0)
pdf.set_xy(145.0, 25.0)
#MEMASUKAN SUB JUDUL KE PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0)
#MEMBUAT GARIS DI BAWAH SUB JUDUL
pdf.line(10.0, 30.0, 287.0, 30.0)
pdf.set_font('times', '', 10.0)
pdf.set_xy(17.0, 37.0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('Times','',10.0)
#MENGAMBIL DATA HEADER PDF YANG SEBELUMNYA SUDAH DIDEFINISIKAN DIATAS
datahead=list(dataheader.values)
pdf.set_font('Times','B',12.0)
pdf.ln(0.5)
th1 = pdf.font_size
#MEMBUAT TABLE PADA PDF, DAN MENAMPILKAN DATA DARI VARIABLE YANG SUDAH DIKIRIM
pdf.cell(100, 2*th1, "Kategori", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][0], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Jenis", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][1], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Pengiriman", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][2], border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Satuan", border=1, align='C')
pdf.cell(177, 2*th1, datahead[0][3], border=1, align='C')
pdf.ln(2*th1)
#PENGATURAN PADDING
pdf.set_xy(17.0, 75.0)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_font('Times','B',11.0)
data=list(datarow)
epw = pdf.w - 2*pdf.l_margin
col_width = epw/(lengthPDF+1)
#PENGATURAN UNTUK JARAK PADDING
pdf.ln(0.5)
th = pdf.font_size
#MEMASUKAN DATA HEADER YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF
pdf.cell(50, 2*th, str("Negara"), border=1, align='C')
for row in data:
pdf.cell(40, 2*th, str(row[1]), border=1, align='C')
pdf.ln(2*th)
#MEMASUKAN DATA ISI YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF
pdf.set_font('Times','B',10.0)
pdf.set_font('Arial','',9)
pdf.cell(50, 2*th, negara, border=1, align='C')
for row in data:
pdf.cell(40, 2*th, str(row[2]), border=1, align='C')
pdf.ln(2*th)
#MENGAMBIL DATA CHART, KEMUDIAN CHART TERSEBUT DIJADIKAN PNG DAN DISIMPAN PADA DIRECTORY DIBAWAH INI
#BAR CHART
bardata = base64.b64decode(bar)
barname = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-bar.png'
with open(barname, 'wb') as f:
f.write(bardata)
#LINE CHART
linedata = base64.b64decode(line)
linename = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-line.png'
with open(linename, 'wb') as f:
f.write(linedata)
#PIE CHART
piedata = base64.b64decode(pie)
piename = basePath+'jupyter/BLOOMBERG/SektorEksternal/img/'+name+'-pie.png'
with open(piename, 'wb') as f:
f.write(piedata)
#PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
widthcol = col/3
#MEMANGGIL DATA GAMBAR DARI DIREKTORY DIATAS
pdf.image(barname, link='', type='',x=8, y=100, w=widthcol)
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(linename, link='', type='',x=103, y=100, w=widthcol)
pdf.set_xy(17.0, 75.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(piename, link='', type='',x=195, y=100, w=widthcol)
pdf.ln(2*th)
#MEMBUAT FILE PDF
pdf.output(basePath+'jupyter/BLOOMBERG/SektorEksternal/pdf/'+name+'.pdf', 'F')
#DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI
#PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART
#DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
#DEFINISIKAN COLUMN BERDASARKAN FIELD CSV
columns = [
"kategori",
"jenis",
"tanggal",
"total",
"pengiriman",
"satuan",
]
#UNTUK NAMA FILE
name = "SektorEksternal2_1"
#VARIABLE UNTUK KONEKSI KE DATABASE
host = "localhost"
username = "postgres"
password = "1234567890"
port = "5432"
database = "bloomberg_sektoreksternal"
table = name.lower()
#JUDUL PADA PDF DAN EXCEL
judul = "Data Sektor Eksternal"
subjudul = "Badan Perencanaan Pembangunan Nasional"
#LIMIT DATA UNTUK SELECT DI DATABASE
limitdata = int(8)
#NAMA NEGARA UNTUK DITAMPILKAN DI EXCEL DAN PDF
negara = "Indonesia"
#BASE PATH DIRECTORY
basePath = 'C:/Users/ASUS/Documents/bappenas/'
#FILE CSV
filePath = basePath+ 'data mentah/BLOOMBERG/SektorEksternal/' +name+'.csv';
#KONEKSI KE DATABASE
engine = create_engine('postgresql://'+username+':'+password+'@'+host+':'+port+'/'+database)
#MEMANGGIL FUNGSI UPLOAD TO PSQL
checkUpload = uploadToPSQL(columns, table, filePath, engine)
#MENGECEK FUNGSI DARI UPLOAD PSQL, JIKA BERHASIL LANJUT MEMBUAT FUNGSI CHART, JIKA GAGAL AKAN MENAMPILKAN PESAN ERROR
if checkUpload == True:
makeChart(host, username, password, database, port, table, judul, columns, filePath, name, subjudul, limitdata, negara, basePath)
else:
print("Error When Upload CSV")
```
| github_jupyter |
```
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
# Set random seed for reproducibility
manualSeed = 999
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
# Root directory for dataset
dataroot = "./data"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 64
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 32
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 64
# Size of feature maps in discriminator
ndf = 64
# Number of training epochs
num_epochs = 20
# Learning rate for optimizers
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
# Create the dataset
dataset = dset.CIFAR10(
root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers)
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8,8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),(1,2,0)))
plt.show()
```
## The Generator
The generator, G, is designed to map the latent space vector (z) to data-space. Since our data are images, converting z to data-space means ultimately creating a RGB image with the same size as the training images (i.e. 3x32x32). In practice, this is accomplished through a series of strided two dimensional convolutional transpose layers, each paired with a 2d batch norm layer and a relu activation. The output of the generator is fed through a tanh function to return it to the input data range of [−1,1]. It is worth noting the existence of the batch norm functions after the conv-transpose layers, as this is a critical contribution of the DCGAN paper. These layers help with the flow of gradients during training. An image of the generator from the DCGAN paper is shown below.
```
# Generator Code
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, kernel_size=1, stride=1, padding=0, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
# Create the generator
netG = Generator(ngpu).to(device)
# Print the model
print(netG)
# Input shape for the DCGAN generator is the variable of shape (1, 100, 1, 1, ).
# There ara nothing important about this shape and you can change it to other numbers
# by modifying `nz` variable. (ex. 128, 200, etc).
# Lets check that GAN generates image with correct shape (1, 3, 32, 32)
input_variable = torch.randn((1, 100, 1, 1, )).to(device)
netG(input_variable).shape
```
## The Discriminator
As mentioned, the discriminator, D, is a binary classification network that takes an image as input and outputs a scalar probability that the input image is real (as opposed to fake). Here, D takes a 3x64x64 input image, processes it through a series of Conv2d, BatchNorm2d, and LeakyReLU layers, and outputs the final probability through a Sigmoid activation function. This architecture can be extended with more layers if necessary for the problem, but there is significance to the use of the strided convolution, BatchNorm, and LeakyReLUs. The DCGAN paper mentions it is a good practice to use strided convolution rather than pooling to downsample because it lets the network learn its own pooling function. Also batch norm and leaky relu functions promote healthy gradient flow which is critical for the learning process of both G and D.
```
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 2, 2, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
# Create the Discriminator
netD = Discriminator(ngpu).to(device)
# Print the model
print(netD)
# Discriminator is the model that should predict single number from input image.
# This number is the probability of input being fake.
# Lets check that Discriminator will return single number from input of size (1, 3, 32, 32)
input_variable = torch.randn((1, 3, 32, 32, )).to(device)
netD(input_variable)
# Initialize BCELoss function
# This is the lost function used in DCGAN
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch+1, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
#%%capture
fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))
# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))
# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()
```
# Task
1) Train for longer to see how good the results get
2) Modify this model to take torchvision.datasets.SVHN as input
3) Modify this model to take torchvision.datasets.MNIST as input
| github_jupyter |
# SAMUR Emergency Frequencies
This notebook explores how the frequency of different types of emergency changes with time in relation to different periods (hours of the day, days of the week, months of the year...) and locations in Madrid. This will be useful for constructing a realistic emergency generator in the city simulation.
Let's start with some imports and setup, and then read the table.
```
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import yaml
%matplotlib inline
df = pd.read_csv("../data/emergency_data.csv")
df.head()
```
The column for the time of the call is a string, so let's change that into a timestamp.
```
df["time_call"] = pd.to_datetime(df["Solicitud"])
```
We will also need to assign a numerical code to each district of the city in order to properly vectorize the distribution an make it easier to work along with other parts of the project.
```
district_codes = {
'Centro': 1,
'Arganzuela': 2,
'Retiro': 3,
'Salamanca': 4,
'Chamartín': 5,
'Tetuán': 6,
'Chamberí': 7,
'Fuencarral - El Pardo': 8,
'Moncloa - Aravaca': 9,
'Latina': 10,
'Carabanchel': 11,
'Usera': 12,
'Puente de Vallecas': 13,
'Moratalaz': 14,
'Ciudad Lineal': 15,
'Hortaleza': 16,
'Villaverde': 17,
'Villa de Vallecas': 18,
'Vicálvaro': 19,
'San Blas - Canillejas': 20,
'Barajas': 21,
}
df["district_code"] = df.Distrito.apply(lambda x: district_codes[x])
```
Each emergency has already been assigned a severity level, depending on the nature of the reported emergency.
```
df["severity"] = df["Gravedad"]
```
We also need the hour, weekday and month of the event in order to assign it in the various distributions.
```
df["hour"] = df["time_call"].apply(lambda x: x.hour) # From 0 to 23
df["weekday"] = df["time_call"].apply(lambda x: x.weekday()+1) # From 1 (Mon) to 7 (Sun)
df["month"] = df["time_call"].apply(lambda x: x.month)
```
Let's also strip down the dataset to just the columns we need right now.
```
df = df[["district_code", "severity", "time_call", "hour", "weekday", "month"]]
df.head()
```
We are going to group the distributions by severity.
```
emergencies_per_grav = df.severity.value_counts().sort_index().rename("total_emergencies")
emergencies_per_grav
```
We will also need the global frequency of the emergencies:
```
total_seconds = (df.time_call.max()-df.time_call.min()).total_seconds()
frequencies_per_grav = (emergencies_per_grav / total_seconds).rename("emergency_frequencies")
frequencies_per_grav
```
Each emergency will need to be assigne a district. Assuming independent distribution of emergencies by district and time, each will be assigned to a district according to a global probability based on this dataset, as follows.
```
prob_per_district = (df.district_code.value_counts().sort_index()/df.district_code.value_counts().sum()).rename("distric_weight")
prob_per_district
```
In order to be able to simplify the generation of emergencies, we are going to assume that the distributions of emergencies per hour, per weekday and per month are independent, sharing no correlation. This is obiously not fully true, but it is a good approximation for the chosen time-frames.
```
hourly_dist = (df.hour.value_counts()/df.hour.value_counts().mean()).sort_index().rename("hourly_distribution")
daily_dist = (df.weekday.value_counts()/df.weekday.value_counts().mean()).sort_index().rename("daily_distribution")
monthly_dist = (df.month.value_counts()/df.month.value_counts().mean()).sort_index().rename("monthly_distribution")
```
We will actually make one of these per severity level.
This will allow us to modify the base emergency density of a given severity as follows:
```
def emergency_density(gravity, hour, weekday, month):
base_density = frequencies_per_grav[gravity]
density = base_density * hourly_dist[hour] * daily_dist[weekday] * monthly_dist[month]
return density
emergency_density(3, 12, 4, 5) # Emergency frequency for severity level 3, at 12 hours of a thursday in May
```
In order for the model to read these distributions we will need to store them in a dict-like format, in this case YAML, which is easily readable by human or machine.
```
dists = {}
for severity in range(1, 6):
sub_df = df[df["severity"] == severity]
frequency = float(frequencies_per_grav.round(8)[severity])
hourly_dist = (sub_df.hour. value_counts()/sub_df.hour. value_counts().mean()).sort_index().round(5).to_dict()
daily_dist = (sub_df.weekday.value_counts()/sub_df.weekday.value_counts().mean()).sort_index().round(5).to_dict()
monthly_dist = (sub_df.month. value_counts()/sub_df.month. value_counts().mean()).sort_index().round(5).to_dict()
district_prob = (sub_df.district_code.value_counts()/sub_df.district_code.value_counts().sum()).sort_index().round(5).to_dict()
dists[severity] = {"frequency": frequency,
"hourly_dist": hourly_dist,
"daily_dist": daily_dist,
"monthly_dist": monthly_dist,
"district_prob": district_prob}
f = open("../data/distributions.yaml", "w+")
yaml.dump(dists, f, allow_unicode=True)
```
We can now check that the dictionary stored in the YAML file is the same one we have created.
```
with open("../data/distributions.yaml") as dist_file:
yaml_dict = yaml.safe_load(dist_file)
yaml_dict == dists
```
| github_jupyter |
```
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, batch_size,
grad_clip=5.0, beam_width=5, force_teaching_ratio=0.5):
def cells(size, reuse=False):
return tf.nn.rnn_cell.GRUCell(size, reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
self.encoder_out = tf.nn.embedding_lookup(encoder_embeddings, self.X)
def bahdanau(size):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = size,
memory = self.encoder_out)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size),
attention_mechanism = attention_mechanism,
attention_layer_size = size)
def luong(size):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size,
memory = self.encoder_out)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size),
attention_mechanism = attention_mechanism,
attention_layer_size = size)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = bahdanau(size_layer//2),
cell_bw = luong(size_layer//2),
inputs = self.encoder_out,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state = tf.concat((state_fw[0],state_bw[0]), -1)
encoder_state = tuple([bi_state] * num_layers)
dense = tf.layers.Dense(to_dict_size)
with tf.variable_scope('decode'):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = self.encoder_out,
memory_sequence_length = self.X_seq_len)
luong_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = size_layer,
memory = self.encoder_out,
memory_sequence_length = self.X_seq_len)
bahdanau_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([luong_cells, bahdanau_cells])
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
training_helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
embedding = decoder_embeddings,
sampling_probability = 1 - force_teaching_ratio,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = training_helper,
initial_state = decoder_cells.zero_state(batch_size, tf.float32),
output_layer = tf.layers.Dense(to_dict_size))
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
self.training_logits = training_decoder_output.rnn_output
with tf.variable_scope('decode', reuse=True):
encoder_out_tiled = tf.contrib.seq2seq.tile_batch(self.encoder_out, beam_width)
encoder_state_tiled = tf.contrib.seq2seq.tile_batch(encoder_state, beam_width)
X_seq_len_tiled = tf.contrib.seq2seq.tile_batch(self.X_seq_len, beam_width)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = size_layer,
memory = encoder_out_tiled,
memory_sequence_length = X_seq_len_tiled)
luong_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer,reuse=True) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = size_layer,
memory = encoder_out_tiled,
memory_sequence_length = X_seq_len_tiled)
bahdanau_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer,reuse=True) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
decoder_cells = tf.nn.rnn_cell.MultiRNNCell([luong_cells, bahdanau_cells])
predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell = decoder_cells,
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS,
initial_state = decoder_cells.zero_state(batch_size * beam_width, tf.float32),
beam_width = beam_width,
output_layer = tf.layers.Dense(to_dict_size, _reuse=True),
length_penalty_weight = 0.0)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = False,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.predicting_ids = predicting_decoder_output.predicted_ids[:, :, 0]
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), batch_size,learning_rate)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, len(short_questions), batch_size):
index = min(k+batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch(X[k: index], PAD)
batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD)
predicted, accuracy,loss, _ = sess.run([model.predicting_ids,
model.accuracy, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y})
total_loss += loss
total_accuracy += accuracy
total_loss /= (len(short_questions) / batch_size)
total_accuracy /= (len(short_questions) / batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD)
predicted = sess.run(model.predicting_ids, feed_dict={model.X:batch_x})
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
```
| github_jupyter |
# S3Fs Notebook Example
S3Fs is a Pythonic file interface to S3. It builds on top of botocore.
The top-level class S3FileSystem holds connection information and allows typical file-system style operations like cp, mv, ls, du, glob, etc., as well as put/get of local files to/from S3.
The connection can be anonymous - in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files.
API Version 2021.06.0
https://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdfhttps://buildmedia.readthedocs.org/media/pdf/s3fs/latest/s3fs.pdf
Note: If you get errors like `ModuleNotFoundError: No module named 's3fs'`, try `pip install s3fs` in a terminal and then restart your notebook:
```
import json
import os
import s3fs
```
Load the credentials file .json to make a connection to `S3FileSystem`
```
tenant="standard"
with open(f'/vault/secrets/minio-{tenant}-tenant-1.json') as f:
creds = json.load(f)
```
The connection can be anonymous- in which case only publicly-available, read-only buckets are accessible - or via credentials explicitly supplied or in configuration files.
Calling open() on a S3FileSystem (typically using a context manager) provides an S3File for read or write access to a particular key. The object emulates the standard File protocol (read, write, tell, seek), such that functions expecting a file can access S3.
```
HOST = creds['MINIO_URL']
SECURE = HOST.startswith('https')
fs = s3fs.S3FileSystem(
anon=False,
use_ssl=SECURE,
client_kwargs=
{
"region_name": "us-east-1",
"endpoint_url": creds['MINIO_URL'],
"aws_access_key_id": creds['AWS_ACCESS_KEY_ID'],
"aws_secret_access_key": creds['AWS_SECRET_ACCESS_KEY']
}
)
```
## Upload a file
Now that your personal bucket exists you can upload your files! We can use
`example.txt` from the same folder as this notebook.
**Note:** Bucket storage doesn't actually have real directories, so you won't
find any functions for creating them. But some software will show you a
directory structure by looking at the slashes (`/`) in the file names. We'll use
this to put `example.txt` under an `/s3fs-examples` faux directory.
```
# Desired location in the bucket
#NB_NAMESPACE: namespace of user e.g. rohan-katkar
LOCAL_FILE='example.txt'
REMOTE_FILE= os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt'
fs.put(LOCAL_FILE,REMOTE_FILE)
```
## Check path exists in bucket
```
fs.exists(os.environ['NB_NAMESPACE']+'/s3fs-examples')
```
## List objects in bucket
```
fs.ls(os.environ['NB_NAMESPACE'])
```
## List objects in path
```
x = []
x= fs.ls(os.environ['NB_NAMESPACE'] +'/s3fs-examples')
for obj in x:
print(f'Name: {obj}')
```
## Download a file
There is another method `download(rpath, lpath[, recursive])`. S3Fs has issues with this method. Get is an equivalent method.
```
from shutil import copyfileobj
DL_FILE='downloaded_s3fsexample.txt'
fs.get(os.environ['NB_NAMESPACE']+'/s3fs-examples/Happy-DAaaS-Bird.txt', DL_FILE)
with open(DL_FILE, 'r') as file:
print(file.read())
```
# That's it!
You've seen how to upload, list, and download files. You can do more things! For
more advanced usage, check out the full API documentation for the
[S3Fs Python SDK](https://s3fs.readthedocs.io/en/latest/api.html).
And don't forget that you can also do this all on the commandline with `mc`.
| github_jupyter |
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
```
# Imports and Functions
```
import numpy as np
from scipy.stats import special_ortho_group
from scipy.spatial.transform import Rotation
from scipy.linalg import svd
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
FIGURE_SCALE = 1.0
FONT_SIZE = 20
plt.rcParams.update({
'figure.figsize': np.array((8, 6)) * FIGURE_SCALE,
'axes.labelsize': FONT_SIZE,
'axes.titlesize': FONT_SIZE,
'xtick.labelsize': FONT_SIZE,
'ytick.labelsize': FONT_SIZE,
'legend.fontsize': FONT_SIZE,
'lines.linewidth': 3,
'lines.markersize': 10,
})
def SO3_via_svd(A):
"""Map 3x3 matrix onto SO(3) via SVD."""
u, s, vt = np.linalg.svd(A)
s_SO3 = [1, 1, np.sign(np.linalg.det(np.matmul(u, vt)))]
return np.matmul(np.matmul(u, np.diag(s_SO3)), vt)
def SO3_via_gramschmidt(A):
"""Map 3x3 matrix on SO(3) via GS, ignores last column."""
x_normalized = A[:, 0] / np.linalg.norm(A[:, 0])
z = np.cross(x_normalized, A[:, 1])
z_normalized = z / np.linalg.norm(z)
y_normalized = np.cross(z_normalized, x_normalized)
return np.stack([x_normalized, y_normalized, z_normalized], axis=1)
def rotate_from_z(v):
"""Construct a rotation matrix R such that R * [0,0,||v||]^T = v.
Input v is shape (3,), output shape is 3x3 """
vn = v / np.linalg.norm(v)
theta = np.arccos(vn[2])
phi = np.arctan2(vn[1], vn[0])
r = Rotation.from_euler('zyz', [0, theta, phi])
R = np.squeeze(r.as_dcm()) # Maps Z to vn
return R
def perturb_rotation_matrix(R, kappa):
"""Perturb a random rotation matrix with noise.
Noise is random small rotation applied to each of the three
column vectors of R. Angle of rotation is sampled from the
von-Mises distribution on the circle (with uniform random azimuth).
The von-Mises distribution is analagous to Gaussian distribution on the circle.
Note, the concentration parameter kappa is inversely related to variance,
so higher kappa means less variance, less noise applied. Good ranges for
kappa are 64 (high noise) up to 512 (low noise).
"""
R_perturb = []
theta = np.random.vonmises(mu=0.0, kappa=kappa, size=(3,))
phi = np.random.uniform(low=0.0, high=np.pi*2.0, size=(3,))
for i in range(3):
v = R[:, i]
R_z_to_v = rotate_from_z(v)
r_noise_z = np.squeeze(Rotation.from_euler('zyz', [0, theta[i], phi[i]]).as_dcm())
v_perturb = np.matmul(R_z_to_v, np.matmul(r_noise_z, np.array([0,0,1])))
R_perturb.append(v_perturb)
R_perturb = np.stack(R_perturb, axis=-1)
return R_perturb
def sigma_to_kappa(sigma):
return ((0.5 - sigma) * 1024) + 64
# We create a ground truth special orthogonal matrix and perturb it with
# additive noise. We then see which orthogonalization process (SVD or GS) is
# better at recovering the ground truth matrix.
def run_expt(sigmas, num_trials, noise_type='gaussian'):
# Always use identity as ground truth, or pick random matrix.
# Nothing should change if we pick random (can verify by setting to True) since
# SVD and Gram-Schmidt are both Equivariant to rotations.
pick_random_ground_truth=False
all_errs_svd = []
all_errs_gs = []
all_geo_errs_svd = []
all_geo_errs_gs = []
all_noise_norms = []
all_noise_sq_norms = []
for sig in sigmas:
svd_errors = np.zeros(num_trials)
gs_errors = np.zeros(num_trials)
svd_geo_errors = np.zeros(num_trials)
gs_geo_errors = np.zeros(num_trials)
noise_norms = np.zeros(num_trials)
noise_sq_norms = np.zeros(num_trials)
for t in range(num_trials):
if pick_random_ground_truth:
A = special_ortho_group.rvs(3) # Pick a random ground truth matrix
else:
A = np.eye(3) # Our ground truth matrix in SO(3)
N = None
if noise_type == 'gaussian':
N = np.random.standard_normal(size=(3,3)) * sig
if noise_type == 'uniform':
N = np.random.uniform(-1, 1, (3, 3)) * sig
if noise_type == 'rademacher':
N = np.sign(np.random.uniform(-1, 1, (3, 3))) * sig
if noise_type == 'rotation':
A_perturb = perturb_rotation_matrix(A, kappa=sigma_to_kappa(sig))
N = A_perturb - A
if N is None:
print ('Error: unknown noise_type: %s', noise_type)
return
AplusN = A + N # Ground-truth plus noise
noise_norm = np.linalg.norm(N)
noise_norm_sq = noise_norm**2
# Compute SVD result and error.
res_svd = SO3_via_svd(AplusN)
error_svd = np.linalg.norm(res_svd - A, ord='fro')**2
error_geodesic_svd = np.arccos(
(np.trace(np.matmul(np.transpose(res_svd), A))-1.0)/2.0);
# Compute GS result and error.
res_gs = SO3_via_gramschmidt(AplusN)
error_gs = np.linalg.norm(res_gs - A, ord='fro')**2
error_geodesic_gs = np.arccos(
(np.trace(np.matmul(np.transpose(res_gs), A))-1.0)/2.0);
svd_errors[t] = error_svd
gs_errors[t] = error_gs
svd_geo_errors[t] = error_geodesic_svd
gs_geo_errors[t] = error_geodesic_gs
noise_norms[t] = noise_norm
noise_sq_norms[t] = noise_norm_sq
all_errs_svd.append(svd_errors)
all_errs_gs.append(gs_errors)
all_geo_errs_svd.append(svd_geo_errors)
all_geo_errs_gs.append(gs_geo_errors)
all_noise_norms.append(noise_norms)
all_noise_sq_norms.append(noise_sq_norms)
print('finished sigma = %f / kappa = %f' % (sig, sigma_to_kappa(sig)))
return [np.array(x) for x in (
all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms)]
boxprops = dict(linewidth=2)
medianprops = dict(linewidth=2)
whiskerprops = dict(linewidth=2)
capprops = dict(linewidth=2)
def make_diff_plot(svd_errs, gs_errs, xvalues, title='', ytitle='', xtitle=''):
plt.figure(figsize=(8,6))
plt.title(title, fontsize=16)
diff = gs_errs - svd_errs
step_size = np.abs(xvalues[1] - xvalues[0])
plt.boxplot(diff.T, positions=xvalues, widths=step_size/2, whis=[5, 95],
boxprops=boxprops, medianprops=medianprops, whiskerprops=whiskerprops, capprops=capprops,
showmeans=False, meanline=True, showfliers=False)
plt.plot(xvalues, np.max(diff, axis=1), 'kx', markeredgewidth=2)
plt.plot(xvalues, np.min(diff, axis=1), 'kx', markeredgewidth=2)
xlim = [np.min(xvalues) - (step_size / 3), np.max(xvalues) + (step_size / 3)]
plt.xlim(xlim)
plt.plot(xlim, [0, 0], 'k--', linewidth=1)
plt.xlabel(xtitle, fontsize=16)
plt.ylabel(ytitle, fontsize=16)
plt.tight_layout()
```
# Global Params
```
num_trials = 100000 # Num trials at each sigma
sigmas = np.linspace(0.125, 0.5, 4)
```
# Gaussian Noise
Here we generate a noise matrix with iid Gaussian entries drawn from
$\sigma N(0,1)$.
The "Frobenius Error Diff" shows the distributions of the error differences
$\|A - \textrm{GS}(\tilde A)\|_F^2 - \|A - \textrm{SVD}(\tilde A)\|_F^2$ for
different values of $\sigma$. The "Geodesic Error Diff" plot shows the
analagous data, but in terms of the geodesic error.
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='gaussian')
plt.plot(sigmas,
3*sigmas**2,
'--b',
label='3 $\\sigma^2$')
plt.errorbar(sigmas,
all_errs_svd.mean(axis=1),
color='b',
label='E[$\\|\\|\\mathrm{SVD}^+(M) - R\\|\\|_F^2]$')
plt.plot(sigmas, 6*sigmas**2,
'--r',
label='6 $\\sigma^2$')
plt.errorbar(sigmas,
all_errs_gs.mean(axis=1),
color='r',
label='E[$\\|\\|\\mathrm{GS}^+(M) - R\\|\\|_F^2$]')
plt.xlabel('$\\sigma$')
plt.legend(loc='upper left')
make_diff_plot(all_errs_svd, all_errs_gs, sigmas, title='Gaussian Noise', ytitle='Frobenius Error Diff', xtitle='$\\sigma$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigmas, title='Gaussian Noise', ytitle='Geodesic Error Diff', xtitle='$\\sigma$')
```
# Uniform Noise
Here, the noise matrix is constructed with iid entries drawn from $\sigma \textrm{Unif}(-1, 1)$.
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='uniform')
make_diff_plot(all_errs_svd, all_errs_gs, sigmas, title='Uniform Noise', ytitle='Frobenius Error Diff', xtitle='$\\phi$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigmas, title='Uniform Noise', ytitle='Geodesic Error Diff', xtitle='$\\phi$')
```
#Rotation Noise
```
(all_errs_svd, all_errs_gs,
all_geo_errs_svd, all_geo_errs_gs,
all_noise_norms, all_noise_sq_norms
) = run_expt(sigmas, num_trials, noise_type='rotation')
make_diff_plot(all_errs_svd, all_errs_gs, sigma_to_kappa(sigmas), title='Rotation Noise', ytitle='Frobenius Error Diff', xtitle='$\\kappa$')
make_diff_plot(all_geo_errs_svd, all_geo_errs_gs, sigma_to_kappa(sigmas), title='Rotation Noise', ytitle='Geodesic Error Diff', xtitle='$\\kappa$')
```
| github_jupyter |
# 1 - Sequence to Sequence Learning with Neural Networks
In this series we'll be building a machine learning model to go from once sequence to another, using PyTorch and torchtext. This will be done on German to English translations, but the models can be applied to any problem that involves going from one sequence to another, such as summarization, i.e. going from a sequence to a shorter sequence in the same language.
In this first notebook, we'll start simple to understand the general concepts by implementing the model from the [Sequence to Sequence Learning with Neural Networks](https://arxiv.org/abs/1409.3215) paper.
## Introduction
The most common sequence-to-sequence (seq2seq) models are *encoder-decoder* models, which commonly use a *recurrent neural network* (RNN) to *encode* the source (input) sentence into a single vector. In this notebook, we'll refer to this single vector as a *context vector*. We can think of the context vector as being an abstract representation of the entire input sentence. This vector is then *decoded* by a second RNN which learns to output the target (output) sentence by generating it one word at a time.

The above image shows an example translation. The input/source sentence, "guten morgen", is passed through the embedding layer (yellow) and then input into the encoder (green). We also append a *start of sequence* (`<sos>`) and *end of sequence* (`<eos>`) token to the start and end of sentence, respectively. At each time-step, the input to the encoder RNN is both the embedding, $e$, of the current word, $e(x_t)$, as well as the hidden state from the previous time-step, $h_{t-1}$, and the encoder RNN outputs a new hidden state $h_t$. We can think of the hidden state as a vector representation of the sentence so far. The RNN can be represented as a function of both of $e(x_t)$ and $h_{t-1}$:
$$h_t = \text{EncoderRNN}(e(x_t), h_{t-1})$$
We're using the term RNN generally here, it could be any recurrent architecture, such as an *LSTM* (Long Short-Term Memory) or a *GRU* (Gated Recurrent Unit).
Here, we have $X = \{x_1, x_2, ..., x_T\}$, where $x_1 = \text{<sos>}, x_2 = \text{guten}$, etc. The initial hidden state, $h_0$, is usually either initialized to zeros or a learned parameter.
Once the final word, $x_T$, has been passed into the RNN via the embedding layer, we use the final hidden state, $h_T$, as the context vector, i.e. $h_T = z$. This is a vector representation of the entire source sentence.
Now we have our context vector, $z$, we can start decoding it to get the output/target sentence, "good morning". Again, we append start and end of sequence tokens to the target sentence. At each time-step, the input to the decoder RNN (blue) is the embedding, $d$, of current word, $d(y_t)$, as well as the hidden state from the previous time-step, $s_{t-1}$, where the initial decoder hidden state, $s_0$, is the context vector, $s_0 = z = h_T$, i.e. the initial decoder hidden state is the final encoder hidden state. Thus, similar to the encoder, we can represent the decoder as:
$$s_t = \text{DecoderRNN}(d(y_t), s_{t-1})$$
Although the input/source embedding layer, $e$, and the output/target embedding layer, $d$, are both shown in yellow in the diagram they are two different embedding layers with their own parameters.
In the decoder, we need to go from the hidden state to an actual word, therefore at each time-step we use $s_t$ to predict (by passing it through a `Linear` layer, shown in purple) what we think is the next word in the sequence, $\hat{y}_t$.
$$\hat{y}_t = f(s_t)$$
The words in the decoder are always generated one after another, with one per time-step. We always use `<sos>` for the first input to the decoder, $y_1$, but for subsequent inputs, $y_{t>1}$, we will sometimes use the actual, ground truth next word in the sequence, $y_t$ and sometimes use the word predicted by our decoder, $\hat{y}_{t-1}$. This is called *teacher forcing*, see a bit more info about it [here](https://machinelearningmastery.com/teacher-forcing-for-recurrent-neural-networks/).
When training/testing our model, we always know how many words are in our target sentence, so we stop generating words once we hit that many. During inference it is common to keep generating words until the model outputs an `<eos>` token or after a certain amount of words have been generated.
Once we have our predicted target sentence, $\hat{Y} = \{ \hat{y}_1, \hat{y}_2, ..., \hat{y}_T \}$, we compare it against our actual target sentence, $Y = \{ y_1, y_2, ..., y_T \}$, to calculate our loss. We then use this loss to update all of the parameters in our model.
## Preparing Data
We'll be coding up the models in PyTorch and using torchtext to help us do all of the pre-processing required. We'll also be using spaCy to assist in the tokenization of the data.
```
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.legacy.datasets import Multi30k
from torchtext.legacy.data import Field, BucketIterator
import spacy
import numpy as np
import random
import math
import time
```
We'll set the random seeds for deterministic results.
```
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
```
Next, we'll create the tokenizers. A tokenizer is used to turn a string containing a sentence into a list of individual tokens that make up that string, e.g. "good morning!" becomes ["good", "morning", "!"]. We'll start talking about the sentences being a sequence of tokens from now, instead of saying they're a sequence of words. What's the difference? Well, "good" and "morning" are both words and tokens, but "!" is a token, not a word.
spaCy has model for each language ("de_core_news_sm" for German and "en_core_web_sm" for English) which need to be loaded so we can access the tokenizer of each model.
**Note**: the models must first be downloaded using the following on the command line:
```
python -m spacy download en_core_web_sm
python -m spacy download de_core_news_sm
```
We load the models as such:
```
spacy_de = spacy.load('de_core_news_sm')
spacy_en = spacy.load('en_core_web_sm')
```
Next, we create the tokenizer functions. These can be passed to torchtext and will take in the sentence as a string and return the sentence as a list of tokens.
In the paper we are implementing, they find it beneficial to reverse the order of the input which they believe "introduces many short term dependencies in the data that make the optimization problem much easier". We copy this by reversing the German sentence after it has been transformed into a list of tokens.
```
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings (tokens) and reverses it
"""
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings (tokens)
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
```
torchtext's `Field`s handle how data should be processed. All of the possible arguments are detailed [here](https://github.com/pytorch/text/blob/master/torchtext/data/field.py#L61).
We set the `tokenize` argument to the correct tokenization function for each, with German being the `SRC` (source) field and English being the `TRG` (target) field. The field also appends the "start of sequence" and "end of sequence" tokens via the `init_token` and `eos_token` arguments, and converts all words to lowercase.
```
SRC = Field(tokenize = tokenize_de,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = tokenize_en,
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
```
Next, we download and load the train, validation and test data.
The dataset we'll be using is the [Multi30k dataset](https://github.com/multi30k/dataset). This is a dataset with ~30,000 parallel English, German and French sentences, each with ~12 words per sentence.
`exts` specifies which languages to use as the source and target (source goes first) and `fields` specifies which field to use for the source and target.
```
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
fields = (SRC, TRG))
```
We can double check that we've loaded the right number of examples:
```
print(f"Number of training examples: {len(train_data.examples)}")
print(f"Number of validation examples: {len(valid_data.examples)}")
print(f"Number of testing examples: {len(test_data.examples)}")
```
We can also print out an example, making sure the source sentence is reversed:
```
print(vars(train_data.examples[0]))
```
The period is at the beginning of the German (src) sentence, so it looks like the sentence has been correctly reversed.
Next, we'll build the *vocabulary* for the source and target languages. The vocabulary is used to associate each unique token with an index (an integer). The vocabularies of the source and target languages are distinct.
Using the `min_freq` argument, we only allow tokens that appear at least 2 times to appear in our vocabulary. Tokens that appear only once are converted into an `<unk>` (unknown) token.
It is important to note that our vocabulary should only be built from the training set and not the validation/test set. This prevents "information leakage" into our model, giving us artifically inflated validation/test scores.
```
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
print(f"Unique tokens in source (de) vocabulary: {len(SRC.vocab)}")
print(f"Unique tokens in target (en) vocabulary: {len(TRG.vocab)}")
```
The final step of preparing the data is to create the iterators. These can be iterated on to return a batch of data which will have a `src` attribute (the PyTorch tensors containing a batch of numericalized source sentences) and a `trg` attribute (the PyTorch tensors containing a batch of numericalized target sentences). Numericalized is just a fancy way of saying they have been converted from a sequence of readable tokens to a sequence of corresponding indexes, using the vocabulary.
We also need to define a `torch.device`. This is used to tell torchText to put the tensors on the GPU or not. We use the `torch.cuda.is_available()` function, which will return `True` if a GPU is detected on our computer. We pass this `device` to the iterator.
When we get a batch of examples using an iterator we need to make sure that all of the source sentences are padded to the same length, the same with the target sentences. Luckily, torchText iterators handle this for us!
We use a `BucketIterator` instead of the standard `Iterator` as it creates batches in such a way that it minimizes the amount of padding in both the source and target sentences.
```
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
```
## Building the Seq2Seq Model
We'll be building our model in three parts. The encoder, the decoder and a seq2seq model that encapsulates the encoder and decoder and will provide a way to interface with each.
### Encoder
First, the encoder, a 2 layer LSTM. The paper we are implementing uses a 4-layer LSTM, but in the interest of training time we cut this down to 2-layers. The concept of multi-layer RNNs is easy to expand from 2 to 4 layers.
For a multi-layer RNN, the input sentence, $X$, after being embedded goes into the first (bottom) layer of the RNN and hidden states, $H=\{h_1, h_2, ..., h_T\}$, output by this layer are used as inputs to the RNN in the layer above. Thus, representing each layer with a superscript, the hidden states in the first layer are given by:
$$h_t^1 = \text{EncoderRNN}^1(e(x_t), h_{t-1}^1)$$
The hidden states in the second layer are given by:
$$h_t^2 = \text{EncoderRNN}^2(h_t^1, h_{t-1}^2)$$
Using a multi-layer RNN also means we'll also need an initial hidden state as input per layer, $h_0^l$, and we will also output a context vector per layer, $z^l$.
Without going into too much detail about LSTMs (see [this](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) blog post to learn more about them), all we need to know is that they're a type of RNN which instead of just taking in a hidden state and returning a new hidden state per time-step, also take in and return a *cell state*, $c_t$, per time-step.
$$\begin{align*}
h_t &= \text{RNN}(e(x_t), h_{t-1})\\
(h_t, c_t) &= \text{LSTM}(e(x_t), h_{t-1}, c_{t-1})
\end{align*}$$
We can just think of $c_t$ as another type of hidden state. Similar to $h_0^l$, $c_0^l$ will be initialized to a tensor of all zeros. Also, our context vector will now be both the final hidden state and the final cell state, i.e. $z^l = (h_T^l, c_T^l)$.
Extending our multi-layer equations to LSTMs, we get:
$$\begin{align*}
(h_t^1, c_t^1) &= \text{EncoderLSTM}^1(e(x_t), (h_{t-1}^1, c_{t-1}^1))\\
(h_t^2, c_t^2) &= \text{EncoderLSTM}^2(h_t^1, (h_{t-1}^2, c_{t-1}^2))
\end{align*}$$
Note how only our hidden state from the first layer is passed as input to the second layer, and not the cell state.
So our encoder looks something like this:

We create this in code by making an `Encoder` module, which requires we inherit from `torch.nn.Module` and use the `super().__init__()` as some boilerplate code. The encoder takes the following arguments:
- `input_dim` is the size/dimensionality of the one-hot vectors that will be input to the encoder. This is equal to the input (source) vocabulary size.
- `emb_dim` is the dimensionality of the embedding layer. This layer converts the one-hot vectors into dense vectors with `emb_dim` dimensions.
- `hid_dim` is the dimensionality of the hidden and cell states.
- `n_layers` is the number of layers in the RNN.
- `dropout` is the amount of dropout to use. This is a regularization parameter to prevent overfitting. Check out [this](https://www.coursera.org/lecture/deep-neural-network/understanding-dropout-YaGbR) for more details about dropout.
We aren't going to discuss the embedding layer in detail during these tutorials. All we need to know is that there is a step before the words - technically, the indexes of the words - are passed into the RNN, where the words are transformed into vectors. To read more about word embeddings, check these articles: [1](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/), [2](http://p.migdal.pl/2017/01/06/king-man-woman-queen-why.html), [3](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/), [4](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/).
The embedding layer is created using `nn.Embedding`, the LSTM with `nn.LSTM` and a dropout layer with `nn.Dropout`. Check the PyTorch [documentation](https://pytorch.org/docs/stable/nn.html) for more about these.
One thing to note is that the `dropout` argument to the LSTM is how much dropout to apply between the layers of a multi-layer RNN, i.e. between the hidden states output from layer $l$ and those same hidden states being used for the input of layer $l+1$.
In the `forward` method, we pass in the source sentence, $X$, which is converted into dense vectors using the `embedding` layer, and then dropout is applied. These embeddings are then passed into the RNN. As we pass a whole sequence to the RNN, it will automatically do the recurrent calculation of the hidden states over the whole sequence for us! Notice that we do not pass an initial hidden or cell state to the RNN. This is because, as noted in the [documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM), that if no hidden/cell state is passed to the RNN, it will automatically create an initial hidden/cell state as a tensor of all zeros.
The RNN returns: `outputs` (the top-layer hidden state for each time-step), `hidden` (the final hidden state for each layer, $h_T$, stacked on top of each other) and `cell` (the final cell state for each layer, $c_T$, stacked on top of each other).
As we only need the final hidden and cell states (to make our context vector), `forward` only returns `hidden` and `cell`.
The sizes of each of the tensors is left as comments in the code. In this implementation `n_directions` will always be 1, however note that bidirectional RNNs (covered in tutorial 3) will have `n_directions` as 2.
```
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, (hidden, cell) = self.rnn(embedded)
#outputs = [src len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#outputs are always from the top hidden layer
return hidden, cell
```
### Decoder
Next, we'll build our decoder, which will also be a 2-layer (4 in the paper) LSTM.

The `Decoder` class does a single step of decoding, i.e. it ouputs single token per time-step. The first layer will receive a hidden and cell state from the previous time-step, $(s_{t-1}^1, c_{t-1}^1)$, and feeds it through the LSTM with the current embedded token, $y_t$, to produce a new hidden and cell state, $(s_t^1, c_t^1)$. The subsequent layers will use the hidden state from the layer below, $s_t^{l-1}$, and the previous hidden and cell states from their layer, $(s_{t-1}^l, c_{t-1}^l)$. This provides equations very similar to those in the encoder.
$$\begin{align*}
(s_t^1, c_t^1) = \text{DecoderLSTM}^1(d(y_t), (s_{t-1}^1, c_{t-1}^1))\\
(s_t^2, c_t^2) = \text{DecoderLSTM}^2(s_t^1, (s_{t-1}^2, c_{t-1}^2))
\end{align*}$$
Remember that the initial hidden and cell states to our decoder are our context vectors, which are the final hidden and cell states of our encoder from the same layer, i.e. $(s_0^l,c_0^l)=z^l=(h_T^l,c_T^l)$.
We then pass the hidden state from the top layer of the RNN, $s_t^L$, through a linear layer, $f$, to make a prediction of what the next token in the target (output) sequence should be, $\hat{y}_{t+1}$.
$$\hat{y}_{t+1} = f(s_t^L)$$
The arguments and initialization are similar to the `Encoder` class, except we now have an `output_dim` which is the size of the vocabulary for the output/target. There is also the addition of the `Linear` layer, used to make the predictions from the top layer hidden state.
Within the `forward` method, we accept a batch of input tokens, previous hidden states and previous cell states. As we are only decoding one token at a time, the input tokens will always have a sequence length of 1. We `unsqueeze` the input tokens to add a sentence length dimension of 1. Then, similar to the encoder, we pass through an embedding layer and apply dropout. This batch of embedded tokens is then passed into the RNN with the previous hidden and cell states. This produces an `output` (hidden state from the top layer of the RNN), a new `hidden` state (one for each layer, stacked on top of each other) and a new `cell` state (also one per layer, stacked on top of each other). We then pass the `output` (after getting rid of the sentence length dimension) through the linear layer to receive our `prediction`. We then return the `prediction`, the new `hidden` state and the new `cell` state.
**Note**: as we always have a sequence length of 1, we could use `nn.LSTMCell`, instead of `nn.LSTM`, as it is designed to handle a batch of inputs that aren't necessarily in a sequence. `nn.LSTMCell` is just a single cell and `nn.LSTM` is a wrapper around potentially multiple cells. Using the `nn.LSTMCell` in this case would mean we don't have to `unsqueeze` to add a fake sequence length dimension, but we would need one `nn.LSTMCell` per layer in the decoder and to ensure each `nn.LSTMCell` receives the correct initial hidden state from the encoder. All of this makes the code less concise - hence the decision to stick with the regular `nn.LSTM`.
```
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.output_dim = output_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
#input = [batch size]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#n directions in the decoder will both always be 1, therefore:
#hidden = [n layers, batch size, hid dim]
#context = [n layers, batch size, hid dim]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
#output = [seq len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#cell = [n layers * n directions, batch size, hid dim]
#seq len and n directions will always be 1 in the decoder, therefore:
#output = [1, batch size, hid dim]
#hidden = [n layers, batch size, hid dim]
#cell = [n layers, batch size, hid dim]
prediction = self.fc_out(output.squeeze(0))
#prediction = [batch size, output dim]
return prediction, hidden, cell
```
### Seq2Seq
For the final part of the implemenetation, we'll implement the seq2seq model. This will handle:
- receiving the input/source sentence
- using the encoder to produce the context vectors
- using the decoder to produce the predicted output/target sentence
Our full model will look like this:

The `Seq2Seq` model takes in an `Encoder`, `Decoder`, and a `device` (used to place tensors on the GPU, if it exists).
For this implementation, we have to ensure that the number of layers and the hidden (and cell) dimensions are equal in the `Encoder` and `Decoder`. This is not always the case, we do not necessarily need the same number of layers or the same hidden dimension sizes in a sequence-to-sequence model. However, if we did something like having a different number of layers then we would need to make decisions about how this is handled. For example, if our encoder has 2 layers and our decoder only has 1, how is this handled? Do we average the two context vectors output by the decoder? Do we pass both through a linear layer? Do we only use the context vector from the highest layer? Etc.
Our `forward` method takes the source sentence, target sentence and a teacher-forcing ratio. The teacher forcing ratio is used when training our model. When decoding, at each time-step we will predict what the next token in the target sequence will be from the previous tokens decoded, $\hat{y}_{t+1}=f(s_t^L)$. With probability equal to the teaching forcing ratio (`teacher_forcing_ratio`) we will use the actual ground-truth next token in the sequence as the input to the decoder during the next time-step. However, with probability `1 - teacher_forcing_ratio`, we will use the token that the model predicted as the next input to the model, even if it doesn't match the actual next token in the sequence.
The first thing we do in the `forward` method is to create an `outputs` tensor that will store all of our predictions, $\hat{Y}$.
We then feed the input/source sentence, `src`, into the encoder and receive out final hidden and cell states.
The first input to the decoder is the start of sequence (`<sos>`) token. As our `trg` tensor already has the `<sos>` token appended (all the way back when we defined the `init_token` in our `TRG` field) we get our $y_1$ by slicing into it. We know how long our target sentences should be (`max_len`), so we loop that many times. The last token input into the decoder is the one **before** the `<eos>` token - the `<eos>` token is never input into the decoder.
During each iteration of the loop, we:
- pass the input, previous hidden and previous cell states ($y_t, s_{t-1}, c_{t-1}$) into the decoder
- receive a prediction, next hidden state and next cell state ($\hat{y}_{t+1}, s_{t}, c_{t}$) from the decoder
- place our prediction, $\hat{y}_{t+1}$/`output` in our tensor of predictions, $\hat{Y}$/`outputs`
- decide if we are going to "teacher force" or not
- if we do, the next `input` is the ground-truth next token in the sequence, $y_{t+1}$/`trg[t]`
- if we don't, the next `input` is the predicted next token in the sequence, $\hat{y}_{t+1}$/`top1`, which we get by doing an `argmax` over the output tensor
Once we've made all of our predictions, we return our tensor full of predictions, $\hat{Y}$/`outputs`.
**Note**: our decoder loop starts at 1, not 0. This means the 0th element of our `outputs` tensor remains all zeros. So our `trg` and `outputs` look something like:
$$\begin{align*}
\text{trg} = [<sos>, &y_1, y_2, y_3, <eos>]\\
\text{outputs} = [0, &\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
Later on when we calculate the loss, we cut off the first element of each tensor to get:
$$\begin{align*}
\text{trg} = [&y_1, y_2, y_3, <eos>]\\
\text{outputs} = [&\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
```
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src)
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden and previous cell states
#receive output tensor (predictions) and new hidden and cell states
output, hidden, cell = self.decoder(input, hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
```
# Training the Seq2Seq Model
Now we have our model implemented, we can begin training it.
First, we'll initialize our model. As mentioned before, the input and output dimensions are defined by the size of the vocabulary. The embedding dimesions and dropout for the encoder and decoder can be different, but the number of layers and the size of the hidden/cell states must be the same.
We then define the encoder, decoder and then our Seq2Seq model, which we place on the `device`.
```
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
```
Next up is initializing the weights of our model. In the paper they state they initialize all weights from a uniform distribution between -0.08 and +0.08, i.e. $\mathcal{U}(-0.08, 0.08)$.
We initialize weights in PyTorch by creating a function which we `apply` to our model. When using `apply`, the `init_weights` function will be called on every module and sub-module within our model. For each module we loop through all of the parameters and sample them from a uniform distribution with `nn.init.uniform_`.
```
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
model.apply(init_weights)
```
We also define a function that will calculate the number of trainable parameters in the model.
```
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
```
We define our optimizer, which we use to update our parameters in the training loop. Check out [this](http://ruder.io/optimizing-gradient-descent/) post for information about different optimizers. Here, we'll use Adam.
```
optimizer = optim.Adam(model.parameters())
```
Next, we define our loss function. The `CrossEntropyLoss` function calculates both the log softmax as well as the negative log-likelihood of our predictions.
Our loss function calculates the average loss per token, however by passing the index of the `<pad>` token as the `ignore_index` argument we ignore the loss whenever the target token is a padding token.
```
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
```
Next, we'll define our training loop.
First, we'll set the model into "training mode" with `model.train()`. This will turn on dropout (and batch normalization, which we aren't using) and then iterate through our data iterator.
As stated before, our decoder loop starts at 1, not 0. This means the 0th element of our `outputs` tensor remains all zeros. So our `trg` and `outputs` look something like:
$$\begin{align*}
\text{trg} = [<sos>, &y_1, y_2, y_3, <eos>]\\
\text{outputs} = [0, &\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
Here, when we calculate the loss, we cut off the first element of each tensor to get:
$$\begin{align*}
\text{trg} = [&y_1, y_2, y_3, <eos>]\\
\text{outputs} = [&\hat{y}_1, \hat{y}_2, \hat{y}_3, <eos>]
\end{align*}$$
At each iteration:
- get the source and target sentences from the batch, $X$ and $Y$
- zero the gradients calculated from the last batch
- feed the source and target into the model to get the output, $\hat{Y}$
- as the loss function only works on 2d inputs with 1d targets we need to flatten each of them with `.view`
- we slice off the first column of the output and target tensors as mentioned above
- calculate the gradients with `loss.backward()`
- clip the gradients to prevent them from exploding (a common issue in RNNs)
- update the parameters of our model by doing an optimizer step
- sum the loss value to a running total
Finally, we return the loss that is averaged over all batches.
```
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
```
Our evaluation loop is similar to our training loop, however as we aren't updating any parameters we don't need to pass an optimizer or a clip value.
We must remember to set the model to evaluation mode with `model.eval()`. This will turn off dropout (and batch normalization, if used).
We use the `with torch.no_grad()` block to ensure no gradients are calculated within the block. This reduces memory consumption and speeds things up.
The iteration loop is similar (without the parameter updates), however we must ensure we turn teacher forcing off for evaluation. This will cause the model to only use it's own predictions to make further predictions within a sentence, which mirrors how it would be used in deployment.
```
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
```
Next, we'll create a function that we'll use to tell us how long an epoch takes.
```
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
```
We can finally start training our model!
At each epoch, we'll be checking if our model has achieved the best validation loss so far. If it has, we'll update our best validation loss and save the parameters of our model (called `state_dict` in PyTorch). Then, when we come to test our model, we'll use the saved parameters used to achieve the best validation loss.
We'll be printing out both the loss and the perplexity at each epoch. It is easier to see a change in perplexity than a change in loss as the numbers are much bigger.
```
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut1-model.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
```
We'll load the parameters (`state_dict`) that gave our model the best validation loss and run it the model on the test set.
```
model.load_state_dict(torch.load('tut1-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
```
In the following notebook we'll implement a model that achieves improved test perplexity, but only uses a single layer in the encoder and the decoder.
| github_jupyter |
```
#!pip install pandas_profiling
#!pip install matplotlib
import sys
sys.version
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import scipy.stats as stats
import pandas_profiling
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 7.5
plt.rcParams['axes.grid'] = True
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.cluster import KMeans
# center and scale the data
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sklearn.metrics as metrics
# reading data into dataframe
Cust= pd.read_csv("CC_GENERAL.csv")
Cust.head()
### Exporting pandas profiling output to html file
output = pandas_profiling.ProfileReport(Cust)
output.to_file(output_file='pandas_profiling.html')
```
### Cols to drop
```
# CUST_ID,ONEOFF_PURCHASES
Cust.info()
Cust.drop(["CUST_ID","ONEOFF_PURCHASES"], axis=1, inplace=True)
Cust.info()
Cust.TENURE.unique()
#Handling Outliers - Method2
def outlier_capping(x):
x = x.clip(upper=x.quantile(0.99), lower=x.quantile(0.01))
return x
Cust=Cust.apply(lambda x: outlier_capping(x))
#Handling missings - Method2
def Missing_imputation(x):
x = x.fillna(x.median())
return x
Cust=Cust.apply(lambda x: Missing_imputation(x))
Cust.corr()
# visualize correlation matrix in Seaborn using a heatmap
sns.heatmap(Cust.corr())
```
### Standardrizing data
- To put data on the same scale
```
sc=StandardScaler()
Cust_scaled=sc.fit_transform(Cust)
pd.DataFrame(Cust_scaled).shape
```
### Applyting PCA
```
pc = PCA(n_components=16)
pc.fit(Cust_scaled)
pc.explained_variance_
#Eigen values
sum(pc.explained_variance_)
#The amount of variance that each PC explains
var= pc.explained_variance_ratio_
var
#Cumulative Variance explains
var1=np.cumsum(np.round(pc.explained_variance_ratio_, decimals=4)*100)
var1
```
number of components have choosen as 6 based on cumulative variacne is explaining >75 % and individual component explaining >0.8 variance
```
pc_final=PCA(n_components=6).fit(Cust_scaled)
pc_final.explained_variance_
reduced_cr=pc_final.transform(Cust_scaled)
dimensions = pd.DataFrame(reduced_cr)
dimensions
dimensions.columns = ["C1", "C2", "C3", "C4", "C5", "C6"]
dimensions.head()
```
#### Factor Loading Matrix
Loadings=Eigenvectors * sqrt(Eigenvalues)
loadings are the covariances/correlations between the original variables and the unit-scaled components.
```
Loadings = pd.DataFrame((pc_final.components_.T * np.sqrt(pc_final.explained_variance_)).T,columns=Cust.columns).T
Loadings.to_csv("Loadings.csv")
```
### Clustering
```
#selected the list variables from PCA based on factor loading matrics
list_var = ['PURCHASES_TRX','INSTALLMENTS_PURCHASES','PURCHASES_INSTALLMENTS_FREQUENCY','MINIMUM_PAYMENTS','BALANCE','CREDIT_LIMIT','CASH_ADVANCE','PRC_FULL_PAYMENT','ONEOFF_PURCHASES_FREQUENCY']
Cust_scaled1=pd.DataFrame(Cust_scaled, columns=Cust.columns)
Cust_scaled1.head(5)
Cust_scaled2=Cust_scaled1[list_var]
Cust_scaled2.head(5)
```
## Segmentation
```
km_3=KMeans(n_clusters=3,random_state=123)
km_3.fit(Cust_scaled2)
print(km_3.labels_)
km_3.cluster_centers_
km_4=KMeans(n_clusters=4,random_state=123).fit(Cust_scaled2)
#km_5.labels_a
km_5=KMeans(n_clusters=5,random_state=123).fit(Cust_scaled2)
#km_5.labels_
km_6=KMeans(n_clusters=6,random_state=123).fit(Cust_scaled2)
#km_6.labels_
km_7=KMeans(n_clusters=7,random_state=123).fit(Cust_scaled2)
#km_7.labels_
km_8=KMeans(n_clusters=8,random_state=123).fit(Cust_scaled2)
#km_5.labels_
metrics.silhouette_score(Cust_scaled2, km_3.labels_)
# 5 clusters are better
# Conactenating labels found through Kmeans with data
# save the cluster labels and sort by cluster
Cust['cluster_3'] = km_3.labels_
Cust['cluster_4'] = km_4.labels_
Cust['cluster_5'] = km_5.labels_
Cust['cluster_6'] = km_6.labels_
Cust['cluster_7'] = km_7.labels_
Cust['cluster_8'] = km_8.labels_
Cust.head()
```
### Choosing number clusters using Silhouette Coefficient
```
# calculate SC for K=6
from sklearn import metrics
metrics.silhouette_score(Cust_scaled2, km_3.labels_)
# calculate SC for K=3 through K=9
k_range = range(3, 13)
scores = []
for k in k_range:
km = KMeans(n_clusters=k, random_state=123)
km.fit(Cust_scaled2)
scores.append(metrics.silhouette_score(Cust_scaled2, km.labels_))
scores
# plot the results
plt.plot(k_range, scores)
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette Coefficient')
plt.grid(True)
```
### Segment Distribution
```
Cust.cluster_3.value_counts()*100/sum(Cust.cluster_3.value_counts())
pd.Series.sort_index(Cust.cluster_3.value_counts())
```
### Profiling
```
size=pd.concat([pd.Series(Cust.cluster_3.size), pd.Series.sort_index(Cust.cluster_3.value_counts()), pd.Series.sort_index(Cust.cluster_4.value_counts()),
pd.Series.sort_index(Cust.cluster_5.value_counts()), pd.Series.sort_index(Cust.cluster_6.value_counts()),
pd.Series.sort_index(Cust.cluster_7.value_counts()), pd.Series.sort_index(Cust.cluster_8.value_counts())])
size
Seg_size=pd.DataFrame(size, columns=['Seg_size'])
Seg_Pct = pd.DataFrame(size/Cust.cluster_3.size, columns=['Seg_Pct'])
Seg_size.T
Seg_Pct.T
pd.concat([Seg_size.T, Seg_Pct.T], axis=0)
Cust.head()
# Mean value gives a good indication of the distribution of data. So we are finding mean value for each variable for each cluster
Profling_output = pd.concat([Cust.apply(lambda x: x.mean()).T, Cust.groupby('cluster_3').apply(lambda x: x.mean()).T, Cust.groupby('cluster_4').apply(lambda x: x.mean()).T,
Cust.groupby('cluster_5').apply(lambda x: x.mean()).T, Cust.groupby('cluster_6').apply(lambda x: x.mean()).T,
Cust.groupby('cluster_7').apply(lambda x: x.mean()).T, Cust.groupby('cluster_8').apply(lambda x: x.mean()).T], axis=1)
Profling_output
Profling_output_final=pd.concat([Seg_size.T, Seg_Pct.T, Profling_output], axis=0)
Profling_output_final
#Profling_output_final.columns = ['Seg_' + str(i) for i in Profling_output_final.columns]
Profling_output_final.columns = ['Overall', 'KM3_1', 'KM3_2', 'KM3_3',
'KM4_1', 'KM4_2', 'KM4_3', 'KM4_4',
'KM5_1', 'KM5_2', 'KM5_3', 'KM5_4', 'KM5_5',
'KM6_1', 'KM6_2', 'KM6_3', 'KM6_4', 'KM6_5','KM6_6',
'KM7_1', 'KM7_2', 'KM7_3', 'KM7_4', 'KM7_5','KM7_6','KM7_7',
'KM8_1', 'KM8_2', 'KM8_3', 'KM8_4', 'KM8_5','KM8_6','KM8_7','KM8_8']
Profling_output_final
Profling_output_final.to_csv('Profiling_output.csv')
```
### Check profiling Output for more details.
Submitted By, Pranjal Saxena <a>https://www.linkedin.com/in/pranjalai/ </a> <br>
pranjal.saxena2012@gmail.com
| github_jupyter |
## Data and Training
The **augmented** cough audio dataset of the [Project Coswara](https://coswara.iisc.ac.in/about) was used to train the deep CNN model.
The preprocessing steps and CNN architecture is as shown below. The training code is concealed on Github to protect the exact hyperparameters and maintain performance integrity of the model.
<img src = "../assets/ml-pipeline.png" alt="../assets/ml-pipeline.png" width="800"/>
## Model Deployment on IBM Watson Machine Learning
Below are the contents of an IBM Watson Studio Notebook for deploying our trained ML model IBM Watson Machine Learning.
Outputs, Keys, Endpoints and URLs are removed (replaced with <>) to maintain privacy.
### Import model
```
import ibm_boto3
from ibm_botocore.client import Config
# @hidden_cell
# The following code contains the credentials for a file in your IBM Cloud Object Storage.
# You might want to remove those credentials before you share your notebook.
credentials_2 = {
'IAM_SERVICE_ID': <>,
'IBM_API_KEY_ID': <>,
'ENDPOINT': <>,
'IBM_AUTH_ENDPOINT': <>,
'BUCKET': <>,
'FILE': 'cough-it-model.tgz'
}
cos = ibm_boto3.client(service_name='s3',
ibm_api_key_id=credentials_2['IBM_API_KEY_ID'],
ibm_auth_endpoint=credentials_2['IBM_AUTH_ENDPOINT'],
ibm_service_instance_id=credentials_2['IAM_SERVICE_ID'],
config=Config(signature_version='oauth'),
endpoint_url=credentials_2['ENDPOINT'])
cos.download_file(Bucket=credentials_2['BUCKET'], Key='cough-it-model.h5.tgz', Filename='cough-it-model.h5.tgz')
model_path = 'cough-it-model.h5.tgz'
```
### Set up Watson Machine Learning Client and Deployment space
```
from ibm_watson_machine_learning import APIClient
wml_credentials = {
"apikey" : <>,
"url" : <>
}
client = APIClient( wml_credentials )
space_guid = <>
client.set.default_space(space_guid)
```
### Store the model
```
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.8")
metadata = {
client.repository.ModelMetaNames.NAME: "cough-it model",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid,
client.repository.ModelMetaNames.TYPE: "tensorflow_2.4"
}
published_model = client.repository.store_model( model= model_path, meta_props=metadata )
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
```
### Create a deployment
```
dep_metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of external Keras model",
client.deployments.ConfigurationMetaNames.ONLINE: {}
}
created_deployment = client.deployments.create(published_model_uid, meta_props=dep_metadata)
deployment_uid = client.deployments.get_uid(created_deployment)
client.deployments.get_details(deployment_uid)
```
| github_jupyter |
# Scraping and Parsing: EAD XML Finding Aids from the Library of Congress
```
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import subprocess
## Creating a directory called 'LOC_Metadata' and setting it as our current working directory
!mkdir /sharedfolder/LOC_Metadata
os.chdir('/sharedfolder/LOC_Metadata')
## To make this notebook self-contained, we'll download a list of XML finding aid files the 'right' way.
## (In practice I normally use the 'find-and-replace + grep + wget' approach we covered in class,
## because it takes some extra effort to remind myself how to parse the HTML page via BeautifulSoup.)
## We first load a page with links to finding aids in the 'recorded sound' collection.
finding_aid_list_url = 'http://findingaids.loc.gov/source/RS'
finding_aid_list_page = urlopen(finding_aid_list_url).read().decode('utf8') # Loading the page
print(finding_aid_list_page[:700]) # Printing the first 700 characters in the page we just loaded
## Now we'll parse the page's HTML using BeautifulSoup ...
soup = BeautifulSoup(finding_aid_list_page, 'lxml')
## ... and examine soup.find_all('a'), which returns a list of 'a' elements (i.e., HTML links).
print(len(soup.find_all('a'))) # Checking the number of links on the page
print() # Printing a blank line for readability
print(soup.find_all('a')[70]) # Printing element #70 in the list
## We can access the 'href' attribute of an element (i.e., the link URL) using 'href' in
## brackets, just like a dictionary.
soup.find_all('a')[70]['href']
## Now let's make a list of every link on the page.
all_links = []
for element in soup.find_all('a'): # Looping through all 'a' elements.
try: # Because some 'a' elements do not contain 'href' attributes,
all_links.append(element['href']) ## we can use a try/except statement to skip elements that
except: ## would otherwise raise an error.
pass
all_links[:15] # Outputting the first 15 links in the list
## We know that the URL for every XML file we're looking for ends in '.2', so we can
## use that fact to filter out irrelevant links.
xml_urls = []
for link in all_links:
if link[-2:] == '.2': # Checking whether the last two characters of a link are '.2'
xml_urls.append(link)
xml_urls # Outputting the full list of relevant XML URLs
## Downloading each XML file in our list of URLs
## We can use the subprocess module (which we imported above) to issue commands in the bash shell.
## In an interactive bash shell session we'd use spaces to separate arguments; instead, subprocess
## takes arguments in the form of a Python list.
## For each item in our list, the following issues a command with two arguments: 'wget' followed by the URL.
## It thus downloads each XML file to the current directory.
for url in xml_urls:
subprocess.call(['wget', url])
## Outputting a list of filenames in the current directory
## In Unix-like operating systems, './' always refers to the current directory.
os.listdir('./')
## Just in case there are other files in the current directory, we can use a
## list comprehension to create a list of filenames that end in '.2' and assign
## it to the variable 'xml_filenames'.
xml_filenames = [item for item in os.listdir('./') if item[-2:]=='.2']
xml_filenames
## Now let's choose an arbitrary XML file in our collection so we can figure out how to parse it.
xml_filename = xml_filenames[4] ## Selecting filename #4 in our list
xml_text = open(xml_filename).read() ## Reading the file and assigning its content to the variable 'xml_text'
print(xml_text[:700]) ## Printing the first 700 characters in the XML text we just loaded
## Parse the XML text from the previous cell using Beautiful Soup
soup = BeautifulSoup(xml_text, 'lxml')
## By looking at the XML text above, we can see that the 'ead' element is the root of our XML tree.
## Let's use a for loop to look at the names of elements one next level down in the tree.
for element in soup.ead:
print(element.name)
## In practice you'd usually just look through the XML file by eye, identify the elements
## you're looking for, and use soup.find_all('...') to extract them. For now, let's continue
## working down the XML tree with BeautifulSoup.
# You can find a glossary of EAD element names here:
# https://loc.gov/ead/EAD3taglib/index.html
## Since the 'eadheader' element is administrative metadata we don't care about, let's
## repeat the process for 'soup.ead.archdesc' ('archdesc' is 'archival description' in EAD parlance).
for element in soup.ead.archdesc:
if element.name != None: ## Filtering out 'None' elements, which in this case are irrelevant comments
print(element.name)
## By looking at the XML file in a text editor, I notice the 'did' element ('descriptive identification')
## contains the item-level information we're looking for. Let's run another for loop to look at the
## names of elements contained within each 'did' element.
for element in soup.ead.archdesc.did:
if element.name != None:
print(element.name)
## Note that 'soup.ead.archdesc.did' only refers to the first 'did' element in the XML document.
## OK, that's enough exploring. Let's use soup.find_all() to create a list of 'did' elements.
did_elements = soup.find_all('did')
print(len(did_elements)) ## Printing the number of 'did' elements in our list
print()
print(did_elements[4]) ## Printing item #4 in the the list
## Not every 'did' element contains the same fields; different objects are described differently.
## Try running this cell several times, plugging in other index numbers to compare the way
## different items' records are formatted.
print(did_elements[7])
## If you run the cell above several times with different index numbers, you'll notice that the
## first item in the list (index 0) refers to the entire box of records, while the others are
## individual folders or series of folders.
## To make things more complicated, some items are physically described using 'container' elements
## while others use 'extent' instead. Most appear to include 'unittitle' and 'unitdate'.
## Our goal is to create a CSV that contains a basic description of each 'unit', or 'did' element,
## in each XML finding aid. For the purposes of this exercise, let's include the following pieces
## of information for each unit, where available:
#### title of the source collection
#### unittitle
#### unitdate
#### container type
#### container number
#### extent
## Since each XML finding aid represents a single collection, we'll want to include a column that
## identifies which collection it comes from. By reading through the XML files, we see that each
## has a single element called 'titleproper' that describes the whole collection.
## Let's create a recipe to extract that text. Here's a first try:
collection_title = soup.find('titleproper').get_text()
collection_title
## That format is OK, but we should remove the tab and newline characters. Let's try again, using
## the replace() function to replace them with spaces.
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ')
collection_title
## We can add the strip() function to remove the space at the end of the string.
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ').strip()
collection_title
## We still have a series of spaces in a row in the middle of the string. We can use a 'while loop'
## to repeatedly replace any occurrence of ' ' (two spaces) with ' ' (one space).
collection_title = soup.find('titleproper').get_text().replace('\t', ' ').replace('\n', ' ').strip()
while ' ' in collection_title:
collection_title = collection_title.replace(' ', ' ')
collection_title
## Perfect. We'll extract the collection name whenever we open an XML finding aid and include it
## in each CSV row associated with that collection.
## Now on to 'unittitle'. Recall that we created a list of 'did' elements above, called 'did_elements'.
element = did_elements[4]
unittitle = element.find('unittitle').get_text()
unittitle
## Since those tabs and newlines are a recurring probem, we should define a function that
## removes them from any given text string.
def clean_text(text):
temp_text = text.replace('\t', ' ').replace('\n', ' ').strip()
while ' ' in temp_text:
temp_text = temp_text.replace(' ', ' ')
return temp_text
# Let's test our clean_text() function.
element = did_elements[4]
unittitle = element.find('unittitle').get_text()
unittitle = clean_text(unittitle)
unittitle
## Now let's try extracting the 'unittitle' field for each 'did' element in our list.
for element in did_elements:
unittitle = element.get_text().replace('\t', ' ').replace('\n', ' ').strip()
print(clean_text(unittitle))
print('-----------------') # Printing a divider between elements
## The first element in the list above contains more information than we need, but we can
## let that slide for this exercise.
## Next is 'unitdate'. We'll use our clean_text() function once again.
element = did_elements[4]
unitdate = element.find('unitdate').get_text()
unitdate = clean_text(unitdate)
unitdate
## Let's loop through the list of 'did' elements and see if our 'unittitle' recipe holds up.
for element in did_elements:
unitdate = element.find('unitdate').get_text()
print(clean_text(unitdate))
print('-----------------') # Printing a divider between elements
## Now on to container type and number. Let's examine a 'container' XML element.
element = did_elements[4]
element.find('container')
## Since the container type ('folder', in this case) is an attribute in the 'container' tag,
## we can extract it using bracket notation.
element = did_elements[4]
container_type = element.find('container')['type']
container_type
## The container number is specified between the opening and closing 'container' tags,
## so we can get it using get_text().
element = did_elements[4]
container_number = element.find('container').get_text()
container_number
## Next we'll try to get the container type and number for each 'did' element in our list ...
for element in did_elements:
container_type = element.find('container')['type']
print(container_type)
container_number = element.find('container').get_text()
print(container_number)
print('-----------------') # Printing a divider between elements
## ... and we get an error. The reason is that some 'did' elements don't include a 'container' field.
## Using try/accept notation, whenever we get an error because a container element isn't found,
## we can revert to '' (an empty string) instead.
for element in did_elements:
try:
container_type = element.find('container')['type']
except:
container_type = ''
print(container_type)
try:
container_number = element.find('container').get_text()
except:
container_number = ''
print(container_number)
print('-----------------') # Printing a divider between elements
## The last field we'll extract is 'extent', which is only included in a handful of 'did' elements.
element = did_elements[3]
extent = element.find('extent').get_text()
extent
## Let's extract 'extent' from each element in our list of 'did' elements (for those that happen to include it).
for element in did_elements:
try:
extent = element.find('extent').get_text()
except:
extent = ''
print(extent)
print('-----------------') # Printing a divider between elements
## Let's put it all together and view our chosen fields for a single 'did' element.
## We will combine our fields in a list to create a 'row' for our future CSV file.
element = did_elements[6]
# unittitle
try: # Added try/except statements for 'unittitle' and 'unitdate' just to be safe
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [unittitle, unitdate, container_type, container_number, extent]
print(row)
## Let's take a step back and generalize, so that we can extract metadata for each
## 'did' element in a single XML file.
## We will also include the 'collection title' field ('titleproper' in EAD's vocabulary) as
## the first item in each row.
xml_filename = xml_filenames[3] # <-- Change the index number there to run the script on another XML file in the list.
xml_text = open(xml_filename).read()
soup = BeautifulSoup(xml_text, 'lxml')
list_of_lists = [] # Creating an empty list, which we will use to hold our rows (each row represented as a list)
try:
collection_title = clean_text(soup.find('titleproper').get_text())
except:
collection_title = xml_filename # If the 'titleproper' field is missing for some reason,
## we'll use the XML filename instead.
for element in soup.find_all('did'):
# unittitle
try:
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [collection_title, unittitle, unitdate, container_type, container_number, extent]
list_of_lists.append(row) ## Adding the row list we defined in the previous line to 'list_of_lists'
list_of_lists[:15] ## Outputting the first 15 rows in our list of lists
## Almost there! Next we'll run the script above on each XML file in our list, creating a
## master list of lists that we'll write to disk as a CSV in the next cell.
## Let's begin by re-loading our list of XML filenames:
os.chdir('/sharedfolder/LOC_Metadata')
xml_filenames = [item for item in os.listdir('./') if item[-2:]=='.2'] # Creating a list of XML filenames
list_of_lists = [] # Creating an empty list
## Now we'll extract metadata from the full batch of XML files. This may take a few moments to complete.
for xml_filename in xml_filenames:
xml_text = open(xml_filename).read()
soup = BeautifulSoup(xml_text, 'lxml')
try:
collection_title = clean_text(soup.find('titleproper').get_text())
except:
collection_title = xml_filename # If the 'titleproper' field is missing for some reason,
## we'll use the XML filename instead.
for element in soup.find_all('did'):
# unittitle
try:
unittitle = clean_text(element.find('unittitle').get_text())
except:
unittitle = ''
# unitdate
try:
unitdate = clean_text(element.find('unitdate').get_text())
except:
unitdate = ''
# container type and number
try:
container_type = element.find('container')['type']
except:
container_type = ''
try:
container_number = element.find('container').get_text()
except:
container_number = ''
# extent
try:
extent = element.find('extent').get_text()
except:
extent = ''
row = [collection_title, unittitle, unitdate, container_type, container_number, extent]
list_of_lists.append(row)
print(len(list_of_lists)) ## Printing the number of rows in our table
## Finally, we write the extracted metadata to disk as a CSV called 'LOC_RS_Reduced_Metadata.csv'
out_path = "./LOC_RS_Reduced_Metadata.csv" # The './' part is optional; it just means we're writing to
# the current working directory.
# Defining a list of column headers, which we will write as the first row in our CSV
column_headers = ['Collection Title', 'Unit Title', 'Unit Date', 'Container Type', 'Container Number', 'Extent']
import csv # Importing Python's built-in CSV input/output package
with open(out_path, 'w') as fo: # Creating a tempory file stream object called 'fo' (my abbreviation for 'file out')
csv_writer = csv.writer(fo) # Initializing our CSV writer
csv_writer.writerow(column_headers) # Writing one row (our column headers)
csv_writer.writerows(list_of_lists) # Writing a list of lists as a sequence of rows
## Go to 'sharedfolder' on your desktop and use LibreOffice or Excel to open your new CSV.
## As you scroll through the CSV file, you will probably see more formatting oddities you can fix
## by tweaking the code above.
```
| github_jupyter |
## Dependencies
```
import warnings, glob
from tensorflow.keras import Sequential, Model
from cassava_scripts import *
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
```
### Hardware configuration
```
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
```
# Model parameters
```
BATCH_SIZE = 8 * REPLICAS
HEIGHT = 380
WIDTH = 380
CHANNELS = 3
N_CLASSES = 5
TTA_STEPS = 0 # Do TTA if > 0
```
# Augmentation
```
def data_augment(image, label):
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
return image, label
```
## Auxiliary functions
```
# Datasets utility functions
def resize_image(image, label):
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def process_path(file_path):
name = get_name(file_path)
img = tf.io.read_file(file_path)
img = decode_image(img)
# img, _ = scale_image(img, None)
# img = center_crop(img, HEIGHT, WIDTH)
return img, name
def get_dataset(files_path, shuffled=False, tta=False, extension='jpg'):
dataset = tf.data.Dataset.list_files(f'{files_path}*{extension}', shuffle=shuffled)
dataset = dataset.map(process_path, num_parallel_calls=AUTO)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(resize_image, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
```
# Load data
```
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
submission = pd.read_csv(f'{database_base_path}sample_submission.csv')
display(submission.head())
TEST_FILENAMES = tf.io.gfile.glob(f'{database_base_path}test_tfrecords/ld_test*.tfrec')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'GCS: test: {NUM_TEST_IMAGES}')
!ls /kaggle/input/
model_path_list = glob.glob('/kaggle/input/162-cassava-leaf-effnetb4-dcr-04-380x380/*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep='\n')
```
# Model
```
def model_fn(input_shape, N_CLASSES):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = tf.keras.applications.EfficientNetB4(input_tensor=inputs,
include_top=False,
drop_connect_rate=.4,
weights=None)
x = L.GlobalAveragePooling2D()(base_model.output)
x = L.Dropout(.5)(x)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=output)
return model
with strategy.scope():
model = model_fn((None, None, CHANNELS), N_CLASSES)
model.summary()
```
# Test set predictions
```
files_path = f'{database_base_path}test_images/'
test_size = len(os.listdir(files_path))
test_preds = np.zeros((test_size, N_CLASSES))
for model_path in model_path_list:
print(model_path)
K.clear_session()
model.load_weights(model_path)
if TTA_STEPS > 0:
test_ds = get_dataset(files_path, tta=True).repeat()
ct_steps = TTA_STEPS * ((test_size/BATCH_SIZE) + 1)
preds = model.predict(test_ds, steps=ct_steps, verbose=1)[:(test_size * TTA_STEPS)]
preds = np.mean(preds.reshape(test_size, TTA_STEPS, N_CLASSES, order='F'), axis=1)
test_preds += preds / len(model_path_list)
else:
test_ds = get_dataset(files_path, tta=False)
x_test = test_ds.map(lambda image, image_name: image)
test_preds += model.predict(x_test) / len(model_path_list)
test_preds = np.argmax(test_preds, axis=-1)
test_names_ds = get_dataset(files_path)
image_names = [img_name.numpy().decode('utf-8') for img, img_name in iter(test_names_ds.unbatch())]
submission = pd.DataFrame({'image_id': image_names, 'label': test_preds})
submission.to_csv('submission.csv', index=False)
display(submission.head())
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Training Pipeline - Custom Script
_**Training many models using a custom script**_
----
This notebook demonstrates how to create a pipeline that trains and registers many models using a custom script. We utilize the [ParallelRunStep](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-parallel-run-step) to parallelize the process of training the models to make the process more efficient. For this solution accelerator we are using the [OJ Sales Dataset](https://azure.microsoft.com/en-us/services/open-datasets/catalog/sample-oj-sales-simulated/) to train individual models that predict sales for each store and brand of orange juice.
The model we use here is a simple, regression-based forecaster built on scikit-learn and pandas utilities. See the [training script](scripts/train.py) to see how the forecaster is constructed. This forecaster is intended for demonstration purposes, so it does not handle the large variety of special cases that one encounters in time-series modeling. For instance, the model here assumes that all time-series are comprised of regularly sampled observations on a contiguous interval with no missing values. The model does not include any handling of categorical variables. For a more general-use forecaster that handles missing data, advanced featurization, and automatic model selection, see the [AutoML Forecasting task](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-forecast). Also, see the notebooks demonstrating [AutoML forecasting in a many models scenario](../Automated_ML).
### Prerequisites
At this point, you should have already:
1. Created your AML Workspace using the [00_Setup_AML_Workspace notebook](../00_Setup_AML_Workspace.ipynb)
2. Run [01_Data_Preparation.ipynb](../01_Data_Preparation.ipynb) to setup your compute and create the dataset
#### Please ensure you have the latest version of the Azure ML SDK and also install Pipeline Steps Package
```
#!pip install --upgrade azureml-sdk
# !pip install azureml-pipeline-steps
```
## 1.0 Connect to workspace and datastore
```
from azureml.core import Workspace
# set up workspace
ws = Workspace.from_config()
# set up datastores
dstore = ws.get_default_datastore()
print('Workspace Name: ' + ws.name,
'Azure Region: ' + ws.location,
'Subscription Id: ' + ws.subscription_id,
'Resource Group: ' + ws.resource_group,
sep = '\n')
```
## 2.0 Create an experiment
```
from azureml.core import Experiment
experiment = Experiment(ws, 'oj_training_pipeline')
print('Experiment name: ' + experiment.name)
```
## 3.0 Get the training Dataset
Next, we get the training Dataset using the [Dataset.get_by_name()](https://docs.microsoft.com/python/api/azureml-core/azureml.core.dataset.dataset#get-by-name-workspace--name--version--latest--) method.
This is the training dataset we created and registered in the [data preparation notebook](../01_Data_Preparation.ipynb). If you chose to use only a subset of the files, the training dataset name will be `oj_data_small_train`. Otherwise, the name you'll have to use is `oj_data_train`.
We recommend to start with the small dataset and make sure everything runs successfully, then scale up to the full dataset.
```
dataset_name = 'oj_data_small_train'
from azureml.core.dataset import Dataset
dataset = Dataset.get_by_name(ws, name=dataset_name)
dataset_input = dataset.as_named_input(dataset_name)
```
## 4.0 Create the training pipeline
Now that the workspace, experiment, and dataset are set up, we can put together a pipeline for training.
### 4.1 Configure environment for ParallelRunStep
An [environment](https://docs.microsoft.com/en-us/azure/machine-learning/concept-environments) defines a collection of resources that we will need to run our pipelines. We configure a reproducible Python environment for our training script including the [scikit-learn](https://scikit-learn.org/stable/index.html) python library.
```
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
train_env = Environment(name="many_models_environment")
train_conda_deps = CondaDependencies.create(pip_packages=['sklearn', 'pandas', 'joblib', 'azureml-defaults', 'azureml-core', 'azureml-dataprep[fuse]'])
train_env.python.conda_dependencies = train_conda_deps
```
### 4.2 Choose a compute target
Currently ParallelRunConfig only supports AMLCompute. This is the compute cluster you created in the [setup notebook](../00_Setup_AML_Workspace.ipynb#3.0-Create-compute-cluster).
```
cpu_cluster_name = "cpucluster"
from azureml.core.compute import AmlCompute
compute = AmlCompute(ws, cpu_cluster_name)
```
### 4.3 Set up ParallelRunConfig
[ParallelRunConfig](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_config.parallelrunconfig?view=azure-ml-py) provides the configuration for the ParallelRunStep we'll be creating next. Here we specify the environment and compute target we created above along with the entry script that will be for each batch.
There's a number of important parameters to configure including:
- **mini_batch_size**: The number of files per batch. If you have 500 files and mini_batch_size is 10, 50 batches would be created containing 10 files each. Batches are split across the various nodes.
- **node_count**: The number of compute nodes to be used for running the user script. For the small sample of OJ datasets, we only need a single node, but you will likely need to increase this number for larger datasets composed of more files. If you increase the node count beyond five here, you may need to increase the max_nodes for the compute cluster as well.
- **process_count_per_node**: The number of processes per node. The compute cluster we are using has 8 cores so we set this parameter to 8.
- **run_invocation_timeout**: The run() method invocation timeout in seconds. The timeout should be set to be higher than the maximum training time of one model (in seconds), by default it's 60. Since the batches that takes the longest to train are about 120 seconds, we set it to be 180 to ensure the method has adequate time to run.
We also added tags to preserve the information about our training cluster's node count, process count per node, and dataset name. You can find the 'Tags' column in Azure Machine Learning Studio.
```
from azureml.pipeline.steps import ParallelRunConfig
processes_per_node = 8
node_count = 1
timeout = 180
parallel_run_config = ParallelRunConfig(
source_directory='./scripts',
entry_script='train.py',
mini_batch_size="1",
run_invocation_timeout=timeout,
error_threshold=-1,
output_action="append_row",
environment=train_env,
process_count_per_node=processes_per_node,
compute_target=compute,
node_count=node_count)
```
### 4.4 Set up ParallelRunStep
This [ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) is the main step in our training pipeline.
First, we set up the output directory and define the pipeline's output name. The datastore that stores the pipeline's output data is Workspace's default datastore.
```
from azureml.pipeline.core import PipelineData
output_dir = PipelineData(name="training_output", datastore=dstore)
```
We provide our ParallelRunStep with a name, the ParallelRunConfig created above and several other parameters:
- **inputs**: A list of input datasets. Here we'll use the dataset created in the previous notebook. The number of files in that path determines the number of models will be trained in the ParallelRunStep.
- **output**: A PipelineData object that corresponds to the output directory. We'll use the output directory we just defined.
- **arguments**: A list of arguments required for the train.py entry script. Here, we provide the schema for the timeseries data - i.e. the names of target, timestamp, and id columns - as well as columns that should be dropped prior to modeling, a string identifying the model type, and the number of observations we want to leave aside for testing.
```
from azureml.pipeline.steps import ParallelRunStep
parallel_run_step = ParallelRunStep(
name="many-models-training",
parallel_run_config=parallel_run_config,
inputs=[dataset_input],
output=output_dir,
allow_reuse=False,
arguments=['--target_column', 'Quantity',
'--timestamp_column', 'WeekStarting',
'--timeseries_id_columns', 'Store', 'Brand',
'--drop_columns', 'Revenue', 'Store', 'Brand',
'--model_type', 'lr',
'--test_size', 20]
)
```
## 5.0 Run the pipeline
Next, we submit our pipeline to run. The run will train models for each dataset using a train set, compute accuracy metrics for the fits using a test set, and finally re-train models with all the data available. With 10 files, this should only take a few minutes but with the full dataset this can take over an hour.
```
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(workspace=ws, steps=[parallel_run_step])
run = experiment.submit(pipeline)
#Wait for the run to complete
run.wait_for_completion(show_output=False, raise_on_error=True)
```
## 6.0 View results of training pipeline
The dataframe we return in the run method of train.py is outputted to *parallel_run_step.txt*. To see the results of our training pipeline, we'll download that file, read in the data to a DataFrame, and then visualize the results, including the in-sample metrics.
The run submitted to the Azure Machine Learning Training Compute Cluster may take a while. The output is not generated until the run is complete. You can monitor the status of the run in Azure Portal https://ml.azure.com
### 6.1 Download parallel_run_step.txt locally
```
import os
def download_results(run, target_dir=None, step_name='many-models-training', output_name='training_output'):
stitch_run = run.find_step_run(step_name)[0]
port_data = stitch_run.get_output_data(output_name)
port_data.download(target_dir, show_progress=True)
return os.path.join(target_dir, 'azureml', stitch_run.id, output_name)
file_path = download_results(run, 'output')
file_path
```
### 6.2 Convert the file to a dataframe
```
import pandas as pd
df = pd.read_csv(file_path + '/parallel_run_step.txt', sep=" ", header=None)
df.columns = ['Store', 'Brand', 'Model', 'File Name', 'ModelName', 'StartTime', 'EndTime', 'Duration',
'MSE', 'RMSE', 'MAE', 'MAPE', 'Index', 'Number of Models', 'Status']
df['StartTime'] = pd.to_datetime(df['StartTime'])
df['EndTime'] = pd.to_datetime(df['EndTime'])
df['Duration'] = df['EndTime'] - df['StartTime']
df.head()
```
### 6.3 Review Results
```
total = df['EndTime'].max() - df['StartTime'].min()
print('Number of Models: ' + str(len(df)))
print('Total Duration: ' + str(total)[6:])
print('Average MAPE: ' + str(round(df['MAPE'].mean(), 5)))
print('Average MSE: ' + str(round(df['MSE'].mean(), 5)))
print('Average RMSE: ' + str(round(df['RMSE'].mean(), 5)))
print('Average MAE: '+ str(round(df['MAE'].mean(), 5)))
print('Maximum Duration: '+ str(df['Duration'].max())[7:])
print('Minimum Duration: ' + str(df['Duration'].min())[7:])
print('Average Duration: ' + str(df['Duration'].mean())[7:])
```
### 6.4 Visualize Performance across models
Here, we produce some charts from the errors metrics calculated during the run using a subset put aside for testing.
First, we examine the distribution of mean absolute percentage error (MAPE) over all the models:
```
import seaborn as sns
import matplotlib.pyplot as plt
fig = sns.boxplot(y='MAPE', data=df)
fig.set_title('MAPE across all models')
```
Next, we can break that down by Brand or Store to see variations in error across our models
```
fig = sns.boxplot(x='Brand', y='MAPE', data=df)
fig.set_title('MAPE by Brand')
```
We can also look at how long models for different brands took to train
```
brand = df.groupby('Brand')
brand = brand['Duration'].sum()
brand = pd.DataFrame(brand)
brand['time_in_seconds'] = [time.total_seconds() for time in brand['Duration']]
brand.drop(columns=['Duration']).plot(kind='bar')
plt.xlabel('Brand')
plt.ylabel('Seconds')
plt.title('Total Training Time by Brand')
plt.show()
```
## 7.0 Publish and schedule the pipeline (Optional)
### 7.1 Publish the pipeline
Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines.
```
# published_pipeline = pipeline.publish(name = 'train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
```
### 7.2 Schedule the pipeline
You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
```
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="training_pipeline_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
```
## Next Steps
Now that you've trained and scored the models, move on to [03_CustomScript_Forecasting_Pipeline.ipynb](03_CustomScript_Forecasting_Pipeline.ipynb) to make forecasts with your models.
| github_jupyter |
# Amazon SageMaker Object Detection for Bird Species
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Data Preparation](#Data-Preparation)
1. [Download and unpack the dataset](#Download-and-unpack-the-dataset)
2. [Understand the dataset](#Understand-the-dataset)
3. [Generate RecordIO files](#Generate-RecordIO-files)
4. [Train the model](#Train-the-model)
5. [Host the model](#Host-the-model)
6. [Test the model](#Test-the-model)
7. [Clean up](#Clean-up)
8. [Improve the model](#Improve-the-model)
9. [Final cleanup](#Final-cleanup)
## Introduction
Object detection is the process of identifying and localizing objects in an image. A typical object detection solution takes an image as input and provides a bounding box on the image where an object of interest is found. It also identifies what type of object the box encapsulates. To create such a solution, we need to acquire and process a traning dataset, create and setup a training job for the alorithm so that it can learn about the dataset. Finally, we can then host the trained model in an endpoint, to which we can supply images.
This notebook is an end-to-end example showing how the Amazon SageMaker Object Detection algorithm can be used with a publicly available dataset of bird images. We demonstrate how to train and to host an object detection model based on the [Caltech Birds (CUB 200 2011)](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) dataset. Amazon SageMaker's object detection algorithm uses the Single Shot multibox Detector ([SSD](https://arxiv.org/abs/1512.02325)) algorithm, and this notebook uses a [ResNet](https://arxiv.org/pdf/1603.05027.pdf) base network with that algorithm.

We will also demonstrate how to construct a training dataset using the RecordIO format, as this is the format that the training job consumes. This notebook is similar to the [Object Detection using the RecordIO format](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_recordio_format.ipynb) notebook, with the following key differences:
- We provide an example of how to translate bounding box specifications when providing images to SageMaker's algorithm. You will see code for generating the train.lst and val.lst files used to create [recordIO](https://mxnet.incubator.apache.org/architecture/note_data_loading.html) files.
- We demonstrate how to improve an object detection model by adding training images that are flipped horizontally (mirror images).
- We give you a notebook for experimenting with object detection challenges with an order of magnitude more classes (200 bird species, as opposed to the 20 categories used by [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/)).
- We show how to chart the accuracy improvements that occur across the epochs of the training job.
Note that Amazon SageMaker Object Detection also allows training with the image and JSON format, which is illustrated in the [image and JSON Notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_image_json_format.ipynb).
## Setup
Before preparing the data, there are some initial steps required for setup.
This notebook requires two additional Python packages:
* **OpenCV** is required for gathering image sizes and flipping of images horizontally.
* The **MXNet** runtime is required for using the im2rec tool.
```
import sys
!{sys.executable} -m pip install opencv-python
!{sys.executable} -m pip install mxnet
```
We need to identify the S3 bucket that you want to use for providing training and validation datasets. It will also be used to store the tranied model artifacts. In this notebook, we use a custom bucket. You could alternatively use a default bucket for the session. We use an object prefix to help organize the bucket content.
```
bucket = "<your_s3_bucket_name_here>" # custom bucket name.
prefix = "DEMO-ObjectDetection-birds"
```
To train the Object Detection algorithm on Amazon SageMaker, we need to setup and authenticate the use of AWS services. To begin with, we need an AWS account role with SageMaker access. Here we will use the execution role the current notebook instance was given when it was created. This role has necessary permissions, including access to your data in S3.
```
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
```
# Data Preparation
The [Caltech Birds (CUB 200 2011)](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html) dataset contains 11,788 images across 200 bird species (the original technical report can be found [here](http://www.vision.caltech.edu/visipedia/papers/CUB_200_2011.pdf)). Each species comes with around 60 images, with a typical size of about 350 pixels by 500 pixels. Bounding boxes are provided, as are annotations of bird parts. A recommended train/test split is given, but image size data is not.

The dataset can be downloaded [here](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html).
## Download and unpack the dataset
Here we download the birds dataset from CalTech.
```
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
%%time
# download('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz')
# CalTech's download is (at least temporarily) unavailable since August 2020.
# Can now use one made available by fast.ai .
download("https://s3.amazonaws.com/fast-ai-imageclas/CUB_200_2011.tgz")
```
Now we unpack the dataset into its own directory structure.
```
%%time
# Clean up prior version of the downloaded dataset if you are running this again
!rm -rf CUB_200_2011
# Unpack and then remove the downloaded compressed tar file
!gunzip -c ./CUB_200_2011.tgz | tar xopf -
!rm CUB_200_2011.tgz
```
# Understand the dataset
## Set some parameters for the rest of the notebook to use
Here we define a few parameters that help drive the rest of the notebook. For example, `SAMPLE_ONLY` is defaulted to `True`. This will force the notebook to train on only a handful of species. Setting to false will make the notebook work with the entire dataset of 200 bird species. This makes the training a more difficult challenge, and you will need many more epochs to complete.
The file parameters define names and locations of metadata files for the dataset.
```
import pandas as pd
import cv2
import boto3
import json
runtime = boto3.client(service_name="runtime.sagemaker")
import matplotlib.pyplot as plt
%matplotlib inline
RANDOM_SPLIT = False
SAMPLE_ONLY = True
FLIP = False
# To speed up training and experimenting, you can use a small handful of species.
# To see the full list of the classes available, look at the content of CLASSES_FILE.
CLASSES = [17, 36, 47, 68, 73]
# Otherwise, you can use the full set of species
if not SAMPLE_ONLY:
CLASSES = []
for c in range(200):
CLASSES += [c + 1]
RESIZE_SIZE = 256
BASE_DIR = "CUB_200_2011/"
IMAGES_DIR = BASE_DIR + "images/"
CLASSES_FILE = BASE_DIR + "classes.txt"
BBOX_FILE = BASE_DIR + "bounding_boxes.txt"
IMAGE_FILE = BASE_DIR + "images.txt"
LABEL_FILE = BASE_DIR + "image_class_labels.txt"
SIZE_FILE = BASE_DIR + "sizes.txt"
SPLIT_FILE = BASE_DIR + "train_test_split.txt"
TRAIN_LST_FILE = "birds_ssd_train.lst"
VAL_LST_FILE = "birds_ssd_val.lst"
if SAMPLE_ONLY:
TRAIN_LST_FILE = "birds_ssd_sample_train.lst"
VAL_LST_FILE = "birds_ssd_sample_val.lst"
TRAIN_RATIO = 0.8
CLASS_COLS = ["class_number", "class_id"]
IM2REC_SSD_COLS = [
"header_cols",
"label_width",
"zero_based_id",
"xmin",
"ymin",
"xmax",
"ymax",
"image_file_name",
]
```
## Explore the dataset images
For each species, there are dozens of images of various shapes and sizes. By dividing the entire dataset into individual named (numbered) folders, the images are in effect labelled for supervised learning using image classification and object detection algorithms.
The following function displays a grid of thumbnail images for all the image files for a given species.
```
def show_species(species_id):
_im_list = !ls $IMAGES_DIR/$species_id
NUM_COLS = 6
IM_COUNT = len(_im_list)
print('Species ' + species_id + ' has ' + str(IM_COUNT) + ' images.')
NUM_ROWS = int(IM_COUNT / NUM_COLS)
if ((IM_COUNT % NUM_COLS) > 0):
NUM_ROWS += 1
fig, axarr = plt.subplots(NUM_ROWS, NUM_COLS)
fig.set_size_inches(8.0, 16.0, forward=True)
curr_row = 0
for curr_img in range(IM_COUNT):
# fetch the url as a file type object, then read the image
f = IMAGES_DIR + species_id + '/' + _im_list[curr_img]
a = plt.imread(f)
# find the column by taking the current index modulo 3
col = curr_img % NUM_ROWS
# plot on relevant subplot
axarr[col, curr_row].imshow(a)
if col == (NUM_ROWS - 1):
# we have finished the current row, so increment row counter
curr_row += 1
fig.tight_layout()
plt.show()
# Clean up
plt.clf()
plt.cla()
plt.close()
```
Show the list of bird species or dataset classes.
```
classes_df = pd.read_csv(CLASSES_FILE, sep=" ", names=CLASS_COLS, header=None)
criteria = classes_df["class_number"].isin(CLASSES)
classes_df = classes_df[criteria]
print(classes_df.to_csv(columns=["class_id"], sep="\t", index=False, header=False))
```
Now for any given species, display thumbnail images of each of the images provided for training and testing.
```
show_species("017.Cardinal")
```
# Generate RecordIO files
## Step 1. Gather image sizes
For this particular dataset, bounding box annotations are specified in absolute terms. RecordIO format requires them to be defined in terms relative to the image size. The following code visits each image, extracts the height and width, and saves this information into a file for subsequent use. Some other publicly available datasets provide such a file for exactly this purpose.
```
%%time
SIZE_COLS = ["idx", "width", "height"]
def gen_image_size_file():
print("Generating a file containing image sizes...")
images_df = pd.read_csv(
IMAGE_FILE, sep=" ", names=["image_pretty_name", "image_file_name"], header=None
)
rows_list = []
idx = 0
for i in images_df["image_file_name"]:
# TODO: add progress bar
idx += 1
img = cv2.imread(IMAGES_DIR + i)
dimensions = img.shape
height = img.shape[0]
width = img.shape[1]
image_dict = {"idx": idx, "width": width, "height": height}
rows_list.append(image_dict)
sizes_df = pd.DataFrame(rows_list)
print("Image sizes:\n" + str(sizes_df.head()))
sizes_df[SIZE_COLS].to_csv(SIZE_FILE, sep=" ", index=False, header=None)
gen_image_size_file()
```
## Step 2. Generate list files for producing RecordIO files
[RecordIO](https://mxnet.incubator.apache.org/architecture/note_data_loading.html) files can be created using the [im2rec tool](https://mxnet.incubator.apache.org/faq/recordio.html) (images to RecordIO), which takes as input a pair of list files, one for training images and the other for validation images. Each list file has one row for each image. For object detection, each row must contain bounding box data and a class label.
For the CalTech birds dataset, we need to convert absolute bounding box dimensions to relative dimensions based on image size. We also need to adjust class id's to be zero-based (instead of 1 to 200, they need to be 0 to 199). This dataset comes with recommended train/test split information ("is_training_image" flag). This notebook is built flexibly to either leverage this suggestion, or to create a random train/test split with a specific train/test ratio. The `RAMDOM_SPLIT` variable defined earlier controls whether or not the split happens randomly.
```
def split_to_train_test(df, label_column, train_frac=0.8):
train_df, test_df = pd.DataFrame(), pd.DataFrame()
labels = df[label_column].unique()
for lbl in labels:
lbl_df = df[df[label_column] == lbl]
lbl_train_df = lbl_df.sample(frac=train_frac)
lbl_test_df = lbl_df.drop(lbl_train_df.index)
print(
"\n{}:\n---------\ntotal:{}\ntrain_df:{}\ntest_df:{}".format(
lbl, len(lbl_df), len(lbl_train_df), len(lbl_test_df)
)
)
train_df = train_df.append(lbl_train_df)
test_df = test_df.append(lbl_test_df)
return train_df, test_df
def gen_list_files():
# use generated sizes file
sizes_df = pd.read_csv(
SIZE_FILE, sep=" ", names=["image_pretty_name", "width", "height"], header=None
)
bboxes_df = pd.read_csv(
BBOX_FILE,
sep=" ",
names=["image_pretty_name", "x_abs", "y_abs", "bbox_width", "bbox_height"],
header=None,
)
split_df = pd.read_csv(
SPLIT_FILE, sep=" ", names=["image_pretty_name", "is_training_image"], header=None
)
print(IMAGE_FILE)
images_df = pd.read_csv(
IMAGE_FILE, sep=" ", names=["image_pretty_name", "image_file_name"], header=None
)
print("num images total: " + str(images_df.shape[0]))
image_class_labels_df = pd.read_csv(
LABEL_FILE, sep=" ", names=["image_pretty_name", "class_id"], header=None
)
# Merge the metadata into a single flat dataframe for easier processing
full_df = pd.DataFrame(images_df)
full_df.reset_index(inplace=True)
full_df = pd.merge(full_df, image_class_labels_df, on="image_pretty_name")
full_df = pd.merge(full_df, sizes_df, on="image_pretty_name")
full_df = pd.merge(full_df, bboxes_df, on="image_pretty_name")
full_df = pd.merge(full_df, split_df, on="image_pretty_name")
full_df.sort_values(by=["index"], inplace=True)
# Define the bounding boxes in the format required by SageMaker's built in Object Detection algorithm.
# the xmin/ymin/xmax/ymax parameters are specified as ratios to the total image pixel size
full_df["header_cols"] = 2 # one col for the number of header cols, one for the label width
full_df["label_width"] = 5 # number of cols for each label: class, xmin, ymin, xmax, ymax
full_df["xmin"] = full_df["x_abs"] / full_df["width"]
full_df["xmax"] = (full_df["x_abs"] + full_df["bbox_width"]) / full_df["width"]
full_df["ymin"] = full_df["y_abs"] / full_df["height"]
full_df["ymax"] = (full_df["y_abs"] + full_df["bbox_height"]) / full_df["height"]
# object detection class id's must be zero based. map from
# class_id's given by CUB to zero-based (1 is 0, and 200 is 199).
if SAMPLE_ONLY:
# grab a small subset of species for testing
criteria = full_df["class_id"].isin(CLASSES)
full_df = full_df[criteria]
unique_classes = full_df["class_id"].drop_duplicates()
sorted_unique_classes = sorted(unique_classes)
id_to_zero = {}
i = 0.0
for c in sorted_unique_classes:
id_to_zero[c] = i
i += 1.0
full_df["zero_based_id"] = full_df["class_id"].map(id_to_zero)
full_df.reset_index(inplace=True)
# use 4 decimal places, as it seems to be required by the Object Detection algorithm
pd.set_option("display.precision", 4)
train_df = []
val_df = []
if RANDOM_SPLIT:
# split into training and validation sets
train_df, val_df = split_to_train_test(full_df, "class_id", TRAIN_RATIO)
train_df[IM2REC_SSD_COLS].to_csv(TRAIN_LST_FILE, sep="\t", float_format="%.4f", header=None)
val_df[IM2REC_SSD_COLS].to_csv(VAL_LST_FILE, sep="\t", float_format="%.4f", header=None)
else:
train_df = full_df[(full_df.is_training_image == 1)]
train_df[IM2REC_SSD_COLS].to_csv(TRAIN_LST_FILE, sep="\t", float_format="%.4f", header=None)
val_df = full_df[(full_df.is_training_image == 0)]
val_df[IM2REC_SSD_COLS].to_csv(VAL_LST_FILE, sep="\t", float_format="%.4f", header=None)
print("num train: " + str(train_df.shape[0]))
print("num val: " + str(val_df.shape[0]))
return train_df, val_df
train_df, val_df = gen_list_files()
```
Here we take a look at a few records from the training list file to understand better what is being fed to the RecordIO files.
The first column is the image number or index. The second column indicates that the label is made up of 2 columns (column 2 and column 3). The third column specifies the label width of a single object. In our case, the value 5 indicates each image has 5 numbers to describe its label information: the class index, and the 4 bounding box coordinates. If there are multiple objects within one image, all the label information should be listed in one line. Our dataset contains only one bounding box per image.
The fourth column is the class label. This identifies the bird species using a zero-based class id. Columns 4 through 7 represent the bounding box for where the bird is found in this image.
The classes should be labeled with successive numbers and start with 0. The bounding box coordinates are ratios of its top-left (xmin, ymin) and bottom-right (xmax, ymax) corner indices to the overall image size. Note that the top-left corner of the entire image is the origin (0, 0). The last column specifies the relative path of the image file within the images directory.
```
!tail -3 $TRAIN_LST_FILE
```
## Step 2. Convert data into RecordIO format
Now we create im2rec databases (.rec files) for training and validation based on the list files created earlier.
```
!python tools/im2rec.py --resize $RESIZE_SIZE --pack-label birds_ssd_sample $BASE_DIR/images/
```
## Step 3. Upload RecordIO files to S3
Upload the training and validation data to the S3 bucket. We do this in multiple channels. Channels are simply directories in the bucket that differentiate the types of data provided to the algorithm. For the object detection algorithm, we call these directories `train` and `validation`.
```
# Upload the RecordIO files to train and validation channels
train_channel = prefix + "/train"
validation_channel = prefix + "/validation"
sess.upload_data(path="birds_ssd_sample_train.rec", bucket=bucket, key_prefix=train_channel)
sess.upload_data(path="birds_ssd_sample_val.rec", bucket=bucket, key_prefix=validation_channel)
s3_train_data = "s3://{}/{}".format(bucket, train_channel)
s3_validation_data = "s3://{}/{}".format(bucket, validation_channel)
```
# Train the model
Next we define an output location in S3, where the model artifacts will be placed on completion of the training. These artifacts are the output of the algorithm's traning job. We also get the URI to the Amazon SageMaker Object Detection docker image. This ensures the estimator uses the correct algorithm from the current region.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, "object-detection", repo_version="latest")
print(training_image)
s3_output_location = "s3://{}/{}/output".format(bucket, prefix)
od_model = sagemaker.estimator.Estimator(
training_image,
role,
train_instance_count=1,
train_instance_type="ml.p3.2xlarge",
train_volume_size=50,
train_max_run=360000,
input_mode="File",
output_path=s3_output_location,
sagemaker_session=sess,
)
```
## Define hyperparameters
The object detection algorithm at its core is the [Single-Shot Multi-Box detection algorithm (SSD)](https://arxiv.org/abs/1512.02325). This algorithm uses a `base_network`, which is typically a [VGG](https://arxiv.org/abs/1409.1556) or a [ResNet](https://arxiv.org/abs/1512.03385). The Amazon SageMaker object detection algorithm supports VGG-16 and ResNet-50. It also has a number of hyperparameters that help configure the training job. The next step in our training, is to setup these hyperparameters and data channels for training the model. See the SageMaker Object Detection [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection.html) for more details on its specific hyperparameters.
One of the hyperparameters here for example is `epochs`. This defines how many passes of the dataset we iterate over and drives the training time of the algorithm. Based on our tests, we can achieve 70% accuracy on a sample mix of 5 species with 100 epochs. When using the full 200 species, we can achieve 52% accuracy with 1,200 epochs.
Note that Amazon SageMaker also provides [Automatic Model Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html). Automatic model tuning, also known as hyperparameter tuning, finds the best version of a model by running many training jobs on your dataset using the algorithm and ranges of hyperparameters that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by a metric that you choose. When [tuning an Object Detection](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection-tuning.html) algorithm for example, the tuning job could find the best `validation:mAP` score by trying out various values for certain hyperparameters such as `mini_batch_size`, `weight_decay`, and `momentum`.
```
def set_hyperparameters(num_epochs, lr_steps):
num_classes = classes_df.shape[0]
num_training_samples = train_df.shape[0]
print("num classes: {}, num training images: {}".format(num_classes, num_training_samples))
od_model.set_hyperparameters(
base_network="resnet-50",
use_pretrained_model=1,
num_classes=num_classes,
mini_batch_size=16,
epochs=num_epochs,
learning_rate=0.001,
lr_scheduler_step=lr_steps,
lr_scheduler_factor=0.1,
optimizer="sgd",
momentum=0.9,
weight_decay=0.0005,
overlap_threshold=0.5,
nms_threshold=0.45,
image_shape=512,
label_width=350,
num_training_samples=num_training_samples,
)
set_hyperparameters(100, "33,67")
```
Now that the hyperparameters are setup, we define the data channels to be passed to the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes. Note that you could add a third channel named `model` to perform incremental training (continue training from where you had left off with a prior model).
```
train_data = sagemaker.session.s3_input(
s3_train_data,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
validation_data = sagemaker.session.s3_input(
s3_validation_data,
distribution="FullyReplicated",
content_type="application/x-recordio",
s3_data_type="S3Prefix",
)
data_channels = {"train": train_data, "validation": validation_data}
```
## Submit training job
We have our `Estimator` object, we have set the hyperparameters for this object, and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm using the `fit` method. This will take more than 10 minutes in our example.
The training process involves a few steps. First, the instances that we requested while creating the `Estimator` classes are provisioned and setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the actual training begins. The provisioning and data downloading will take time, depending on the size of the data. Therefore it might be a few minutes before our training job logs show up in CloudWatch. The logs will also print out Mean Average Precision (mAP) on the validation data, among other losses, for every run of the dataset (once per epoch). This metric is a proxy for the accuracy of the model.
Once the job has finished, a `Job complete` message will be printed. The trained model artifacts can be found in the S3 bucket that was setup as `output_path` in the estimator.
```
%%time
od_model.fit(inputs=data_channels, logs=True)
```
Now that the training job is complete, you can also see the job listed in the `Training jobs` section of your SageMaker console. Note that the job name is uniquely identified by the name of the algorithm concatenated with the date and time stamp. You can click on the job to see the details including the hyperparameters, the data channel definitions, and the full path to the resulting model artifacts. You could even clone the job from the console, and tweak some of the parameters to generate a new training job.
Without having to go to the CloudWatch console, you can see how the job progressed in terms of the key object detection algorithm metric, mean average precision (mAP). This function below prepares a simple chart of that metric against the epochs.
```
import boto3
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
%matplotlib inline
client = boto3.client("logs")
BASE_LOG_NAME = "/aws/sagemaker/TrainingJobs"
def plot_object_detection_log(model, title):
logs = client.describe_log_streams(
logGroupName=BASE_LOG_NAME, logStreamNamePrefix=model._current_job_name
)
cw_log = client.get_log_events(
logGroupName=BASE_LOG_NAME, logStreamName=logs["logStreams"][0]["logStreamName"]
)
mAP_accs = []
for e in cw_log["events"]:
msg = e["message"]
if "validation mAP <score>=" in msg:
num_start = msg.find("(")
num_end = msg.find(")")
mAP = msg[num_start + 1 : num_end]
mAP_accs.append(float(mAP))
print(title)
print("Maximum mAP: %f " % max(mAP_accs))
fig, ax = plt.subplots()
plt.xlabel("Epochs")
plt.ylabel("Mean Avg Precision (mAP)")
(val_plot,) = ax.plot(range(len(mAP_accs)), mAP_accs, label="mAP")
plt.legend(handles=[val_plot])
ax.yaxis.set_ticks(np.arange(0.0, 1.05, 0.1))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%0.2f"))
plt.show()
plot_object_detection_log(od_model, "mAP tracking for job: " + od_model._current_job_name)
```
# Host the model
Once the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This lets us make predictions (or inferences) from the model. Note that we don't have to host using the same type of instance that we used to train. Training is a prolonged and compute heavy job with different compute and memory requirements that hosting typically does not. In our case we chose the `ml.p3.2xlarge` instance to train, but we choose to host the model on the less expensive cpu instance, `ml.m4.xlarge`. The endpoint deployment takes several minutes, and can be accomplished with a single line of code calling the `deploy` method.
Note that some use cases require large sets of inferences on a predefined body of images. In those cases, you do not need to make the inferences in real time. Instead, you could use SageMaker's [batch transform jobs](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html).
```
%%time
object_detector = od_model.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
```
# Test the model
Now that the trained model is deployed at an endpoint that is up-and-running, we can use this endpoint for inference. The results of a call to the inference endpoint are in a format that is similar to the .lst format, with the addition of a confidence score for each detected object. The format of the output can be represented as `[class_index, confidence_score, xmin, ymin, xmax, ymax]`. Typically, we don't visualize low-confidence predictions.
We have provided a script to easily visualize the detection outputs. You can visulize the high-confidence preditions with bounding box by filtering out low-confidence detections using the script below:
```
def visualize_detection(img_file, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread(img_file)
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
num_detections = 0
for det in dets:
(klass, score, x0, y0, x1, y1) = det
if score < thresh:
continue
num_detections += 1
cls_id = int(klass)
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(x0 * width)
ymin = int(y0 * height)
xmax = int(x1 * width)
ymax = int(y1 * height)
rect = plt.Rectangle(
(xmin, ymin),
xmax - xmin,
ymax - ymin,
fill=False,
edgecolor=colors[cls_id],
linewidth=3.5,
)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
print("{},{}".format(class_name, score))
plt.gca().text(
xmin,
ymin - 2,
"{:s} {:.3f}".format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12,
color="white",
)
print("Number of detections: " + str(num_detections))
plt.show()
```
Now we use our endpoint to try to detect objects within an image. Since the image is a jpeg, we use the appropriate content_type to run the prediction. The endpoint returns a JSON object that we can simply load and peek into. We have packaged the prediction code into a function to make it easier to test other images. Note that we are defaulting the confidence threshold to 30% in our example, as a couple of the birds in our sample images were not being detected as clearly. Defining an appropriate threshold is entirely dependent on your use case.
```
OBJECT_CATEGORIES = classes_df["class_id"].values.tolist()
def show_bird_prediction(filename, ep, thresh=0.40):
b = ""
with open(filename, "rb") as image:
f = image.read()
b = bytearray(f)
endpoint_response = runtime.invoke_endpoint(EndpointName=ep, ContentType="image/jpeg", Body=b)
results = endpoint_response["Body"].read()
detections = json.loads(results)
visualize_detection(filename, detections["prediction"], OBJECT_CATEGORIES, thresh)
```
Here we download images that the algorithm has not yet seen.
```
!wget -q -O multi-goldfinch-1.jpg https://t3.ftcdn.net/jpg/01/44/64/36/500_F_144643697_GJRUBtGc55KYSMpyg1Kucb9yJzvMQooW.jpg
!wget -q -O northern-flicker-1.jpg https://upload.wikimedia.org/wikipedia/commons/5/5c/Northern_Flicker_%28Red-shafted%29.jpg
!wget -q -O northern-cardinal-1.jpg https://cdn.pixabay.com/photo/2013/03/19/04/42/bird-94957_960_720.jpg
!wget -q -O blue-jay-1.jpg https://cdn12.picryl.com/photo/2016/12/31/blue-jay-bird-feather-animals-b8ee04-1024.jpg
!wget -q -O hummingbird-1.jpg http://res.freestockphotos.biz/pictures/17/17875-hummingbird-close-up-pv.jpg
def test_model():
show_bird_prediction("hummingbird-1.jpg", object_detector.endpoint)
show_bird_prediction("blue-jay-1.jpg", object_detector.endpoint)
show_bird_prediction("multi-goldfinch-1.jpg", object_detector.endpoint)
show_bird_prediction("northern-flicker-1.jpg", object_detector.endpoint)
show_bird_prediction("northern-cardinal-1.jpg", object_detector.endpoint)
test_model()
```
# Clean up
Here we delete the SageMaker endpoint, as we will no longer be performing any inferences. This is an important step, as your account is billed for the amount of time an endpoint is running, even when it is idle.
```
sagemaker.Session().delete_endpoint(object_detector.endpoint)
```
# Improve the model
## Define Function to Flip the Images Horizontally (on the X Axis)
```
from PIL import Image
def flip_images():
print("Flipping images...")
SIZE_COLS = ["idx", "width", "height"]
IMAGE_COLS = ["image_pretty_name", "image_file_name"]
LABEL_COLS = ["image_pretty_name", "class_id"]
BBOX_COLS = ["image_pretty_name", "x_abs", "y_abs", "bbox_width", "bbox_height"]
SPLIT_COLS = ["image_pretty_name", "is_training_image"]
images_df = pd.read_csv(BASE_DIR + "images.txt", sep=" ", names=IMAGE_COLS, header=None)
image_class_labels_df = pd.read_csv(
BASE_DIR + "image_class_labels.txt", sep=" ", names=LABEL_COLS, header=None
)
bboxes_df = pd.read_csv(BASE_DIR + "bounding_boxes.txt", sep=" ", names=BBOX_COLS, header=None)
split_df = pd.read_csv(
BASE_DIR + "train_test_split.txt", sep=" ", names=SPLIT_COLS, header=None
)
NUM_ORIGINAL_IMAGES = images_df.shape[0]
rows_list = []
bbox_rows_list = []
size_rows_list = []
label_rows_list = []
split_rows_list = []
idx = 0
full_df = images_df.copy()
full_df.reset_index(inplace=True)
full_df = pd.merge(full_df, image_class_labels_df, on="image_pretty_name")
full_df = pd.merge(full_df, bboxes_df, on="image_pretty_name")
full_df = pd.merge(full_df, split_df, on="image_pretty_name")
full_df.sort_values(by=["index"], inplace=True)
if SAMPLE_ONLY:
# grab a small subset of species for testing
criteria = full_df["class_id"].isin(CLASSES)
full_df = full_df[criteria]
for rel_image_fn in full_df["image_file_name"]:
idx += 1
full_img_content = full_df[(full_df.image_file_name == rel_image_fn)]
class_id = full_img_content.iloc[0].class_id
img = Image.open(IMAGES_DIR + rel_image_fn)
width, height = img.size
new_idx = idx + NUM_ORIGINAL_IMAGES
flip_core_file_name = rel_image_fn[:-4] + "_flip.jpg"
flip_full_file_name = IMAGES_DIR + flip_core_file_name
img_flip = img.transpose(Image.FLIP_LEFT_RIGHT)
img_flip.save(flip_full_file_name)
# append a new image
dict = {"image_pretty_name": new_idx, "image_file_name": flip_core_file_name}
rows_list.append(dict)
# append a new split, use same flag for flipped image from original image
is_training_image = full_img_content.iloc[0].is_training_image
split_dict = {"image_pretty_name": new_idx, "is_training_image": is_training_image}
split_rows_list.append(split_dict)
# append a new image class label
label_dict = {"image_pretty_name": new_idx, "class_id": class_id}
label_rows_list.append(label_dict)
# add a size row for the original and the flipped image, same height and width
size_dict = {"idx": idx, "width": width, "height": height}
size_rows_list.append(size_dict)
size_dict = {"idx": new_idx, "width": width, "height": height}
size_rows_list.append(size_dict)
# append bounding box for flipped image
x_abs = full_img_content.iloc[0].x_abs
y_abs = full_img_content.iloc[0].y_abs
bbox_width = full_img_content.iloc[0].bbox_width
bbox_height = full_img_content.iloc[0].bbox_height
flipped_x_abs = width - bbox_width - x_abs
bbox_dict = {
"image_pretty_name": new_idx,
"x_abs": flipped_x_abs,
"y_abs": y_abs,
"bbox_width": bbox_width,
"bbox_height": bbox_height,
}
bbox_rows_list.append(bbox_dict)
print("Done looping through original images")
images_df = images_df.append(rows_list)
images_df[IMAGE_COLS].to_csv(IMAGE_FILE, sep=" ", index=False, header=None)
bboxes_df = bboxes_df.append(bbox_rows_list)
bboxes_df[BBOX_COLS].to_csv(BBOX_FILE, sep=" ", index=False, header=None)
split_df = split_df.append(split_rows_list)
split_df[SPLIT_COLS].to_csv(SPLIT_FILE, sep=" ", index=False, header=None)
sizes_df = pd.DataFrame(size_rows_list)
sizes_df[SIZE_COLS].to_csv(SIZE_FILE, sep=" ", index=False, header=None)
image_class_labels_df = image_class_labels_df.append(label_rows_list)
image_class_labels_df[LABEL_COLS].to_csv(LABEL_FILE, sep=" ", index=False, header=None)
print("Done saving metadata in text files")
```
## Re-train the model with the expanded dataset
```
%%time
BBOX_FILE = BASE_DIR + "bounding_boxes_with_flip.txt"
IMAGE_FILE = BASE_DIR + "images_with_flip.txt"
LABEL_FILE = BASE_DIR + "image_class_labels_with_flip.txt"
SIZE_FILE = BASE_DIR + "sizes_with_flip.txt"
SPLIT_FILE = BASE_DIR + "train_test_split_with_flip.txt"
# add a set of flipped images
flip_images()
# show the new full set of images for a species
show_species("017.Cardinal")
# create new sizes file
gen_image_size_file()
# re-create and re-deploy the RecordIO files with the updated set of images
train_df, val_df = gen_list_files()
!python tools/im2rec.py --resize $RESIZE_SIZE --pack-label birds_ssd_sample $BASE_DIR/images/
sess.upload_data(path="birds_ssd_sample_train.rec", bucket=bucket, key_prefix=train_channel)
sess.upload_data(path="birds_ssd_sample_val.rec", bucket=bucket, key_prefix=validation_channel)
# account for the new number of training images
set_hyperparameters(100, "33,67")
# re-train
od_model.fit(inputs=data_channels, logs=True)
# check out the new accuracy
plot_object_detection_log(od_model, "mAP tracking for job: " + od_model._current_job_name)
```
## Re-deploy and test
```
# host the updated model
object_detector = od_model.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
# test the new model
test_model()
```
## Final cleanup
Here we delete the SageMaker endpoint, as we will no longer be performing any inferences. This is an important step, as your account is billed for the amount of time an endpoint is running, even when it is idle.
```
# delete the new endpoint
sagemaker.Session().delete_endpoint(object_detector.endpoint)
```
| github_jupyter |
# Test
```
import fastai.train
import pandas as pd
import torch
import torch.nn as nn
from captum.attr import LayerIntegratedGradients
# --- Model Setup ---
# Load a fast.ai `Learner` trained to predict IMDB review category `[negative, positive]`
awd = fastai.train.load_learner(".", "imdb_fastai_trained_lm_clf.pth")
awd.model[0].bptt = 200
# getting to the actual layer that holds embeddings
embedding_layer = awd.model[0]._modules["module"]._modules["encoder_dp"]
# working around the model prediction - first output only, apply softmax
forward_func = lambda x: torch.softmax(awd.model(x)[0], dim=-1)
# make integrated gradients instance
lig = LayerIntegratedGradients(forward_func, embedding_layer)
# Explainer logic
def get_attributions_for_sentence(
sentence,
awd_model=awd,
lig_instance=lig,
target=None,
lig_n_steps=200,
baseline_token="\n \n ",
):
awd = awd_model
lig = lig_instance
vocab = awd.data.x.vocab
sentence_tokens = awd.data.one_item(sentence)[0]
reversed_tokens = [vocab.itos[w] for w in sentence_tokens[0]]
baseline = (
torch.ones_like(sentence_tokens) * vocab.stoi[baseline_token]
) # see "how to choose a good baseline"
baseline[0, 0] = vocab.stoi["xxbos"] # beginning of sentence is always #1
y = awd.predict(sentence)
if target is None:
target = y[1].item()
attrs = lig.attribute(sentence_tokens, baseline, target, n_steps=lig_n_steps)
a = attrs.sum(-1)
a = a / torch.norm(a)
return (pd.Series(a.numpy()[0], index=reversed_tokens), y)
# https://www.imdb.com/review/rw5384922/?ref_=tt_urv
review_1917 = """I sat in a packed yet silent theater this morning and watched, what I believe to be, the next Academy Award winner for the Best Picture."""
"""I'm not at all a fan of war movies but I am a fan of great movies... and 1917 is a great movie. I have never been so mesmerized by set design and direction, the mass human emotion of this film is astonishingly captured and embedded magically in the audience. It keeps running through my mind...the poetry and beauty intertwined with the raw misery of war. Treat yourself... see this movie!
""";
import ipyvuetify as v
import ipywidgets as w
class Chip(v.Chip):
positive = "0, 255, 0"
negative = "255, 0, 0"
def __init__(self, word, attribution):
direction = self.positive if attribution >= 0 else self.negative
color = f"rgba({direction}, {abs(attribution):.2f})"
super().__init__(
class_="mx-0 px-1",
children=[word],
color=color,
value=attribution,
label=True,
small=True,
)
def saliency_chips(attributions: pd.Series) -> v.ChipGroup:
children = [Chip(w, a) for w, a in attributions.iteritems()]
return v.ChipGroup(column=True, children=children)
@w.interact_manual(
sentence=w.Textarea(review_1917),
target=[None, 0, 1],
baseline_token=["\n \n", ".", "<BOS>"],
)
def display_attributions(sentence="Great film", target=None, baseline_token="\n \n "):
attributions, prediction = get_attributions_for_sentence(sentence)
return saliency_chips(attributions)
```
| github_jupyter |
# Repertoire classification subsampling
When training a classifier to assign repertoires to the subject from which they were obtained, we need a set of subsampled sequences. The sequences have been condensed to just the V- and J-gene assignments and the CDR3 length (VJ-CDR3len). Subsample sizes range from 10 to 10,000 sequences per biological replicate.
The [`abutils`](https://www.github.com/briney/abutils) Python package is required for this notebook, and can be installed by running `pip install abutils`.
*NOTE: this notebook requires the use of the Unix command line tool `shuf`. Thus, it requires a Unix-based operating system to run correctly (MacOS and most flavors of Linux should be fine). Running this notebook on Windows 10 may be possible using the [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/about) but we have not tested this.*
```
from __future__ import print_function, division
from collections import Counter
import os
import subprocess as sp
import sys
import tempfile
from abutils.utils.pipeline import make_dir
```
## Subjects, subsample sizes, and directories
The `input_dir` should contain deduplicated clonotype sequences. The datafiles are too large to be included in the Github repository, but may be downloaded [**here**](http://burtonlab.s3.amazonaws.com/GRP_github_data/techrep-merged_vj-cdr3len_no-header.tar.gz). If downloading the data (which will be downloaded as a compressed archive), decompress the archive in the `data` directory (in the same parent directory as this notebook) and you should be ready to go. If you want to store the downloaded data in some other location, adjust the `input_dir` path below as needed.
By default, subsample sizes increase by 10 from 10 to 100, by 100 from 100 to 1,000, and by 1,000 from 1,000 to 10,000.
```
with open('./data/subjects.txt') as f:
subjects = sorted(f.read().split())
subsample_sizes = list(range(10, 100, 10)) + list(range(100, 1000, 100)) + list(range(1000, 11000, 1000))
input_dir = './data/techrep-merged_vj-cdr3len_no-header/'
subsample_dir = './data/repertoire_classification/user-created_subsamples_vj-cdr3len'
make_dir(subsample_dir)
```
## Subsampling
```
def subsample(infile, outfile, n_seqs, iterations):
with open(outfile, 'w') as f:
f.write('')
shuf_cmd = 'shuf -n {} {}'.format(n_seqs, infile)
p = sp.Popen(shuf_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
with open(outfile, 'a') as f:
for iteration in range(iterations):
seqs = ['_'.join(s.strip().split()) for s in stdout.strip().split('\n') if s.strip()]
counts = Counter(seqs)
count_strings = []
for k, v in counts.items():
count_strings.append('{}:{}'.format(k, v))
f.write(','.join(count_strings) + '\n')
for subject in subjects:
print(subject)
files = list_files(os.path.join(input_dir, subject))
for file_ in files:
for subsample_size in subsample_sizes:
num = os.path.basename(file_).split('_')[0]
ofile = os.path.join(subsample_dir, '{}_{}-{}'.format(subject, subsample_size, num))
subsample(file_, ofile, subsample_size, 50)
```
| github_jupyter |
# Strata objects: Legend and Column
Strata is stratigraphic data.
The main object of `strata` submodule is `mplStrater.strata.Column` which represents the single stratigraphic column.
This example shows the structure of the class and how to use it.
First, import all required packages and load the example dataset.
```
%load_ext autoreload
%autoreload 2
from mplStrater.data import StrataFrame
from mplStrater.strata import Column,Legend
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv("../../../data/example.csv")
df.head()
```
Then, initiate a `mpl.StrataFrame` providing a `pandas.DataFrame` and specifying its `epsg` code.
```
sf=StrataFrame(
df=df,
epsg=32633)
```
## Define a `Legend`.
This is done providing a dictionary containing pairs of (value-specification) the `fill_dict` parameter and for the `hatch_fill` parameter.
The dictionary matches dataframe `fill` and `hatch` column values to either a *matplotlib encoded color* or *encoded hatch* string.
The example uses the following dictionaries.
```
fill_dict={
'Terreno conforme': 'lightgreen',
'Riporto conforme': 'darkgreen',
'Riporto non conforme': 'orange',
'Rifiuto': 'red',
'Assenza campione': 'white'
}
hatch_dict={
'Non pericoloso': '',
'Pericoloso': 'xxxxxxxxx',
'_': ''
}
l=Legend(
fill_dict=fill_dict,
hatch_dict=hatch_dict
)
```
## Plot stand-alone `Column` objects
Imagine we would need to inspect closely a column. It's not sure that we would be able to clearly do it on the map with all other elements (labels, basemap...). Unless exporting the map in pdf with a high resolution, open the local file... would take sooo long! Therefore `Column` object has its own `plot()` method.
Let's plot the first three columns of the strataframe.
```
sf.strataframe[:3]
```
Plot the first three columns contained in the `StrataFrame`.
```
#create figure
f,axes=plt.subplots(1,4,figsize=(5,3),dpi=200,frameon=False)
for ax,i in zip(axes,range(4)):
ax.axis('off')
#instantiate class
c=Column(
#figure
ax,l,
#id
sf.strataframe.loc[i,"ID"],
#coords
(0.9,0.9),
#scale
sf.strataframe.loc[i,"scale"],
3,
#stratigraphic data
sf.strataframe.loc[i,"layers"],
sf.strataframe.loc[i,"fill_list"],
sf.strataframe.loc[i,"hatch_list"],
#labels
sf.strataframe.loc[i,"lbl1_list"],
sf.strataframe.loc[i,"lbl2_list"],
sf.strataframe.loc[i,"lbl3_list"])
ax.set_title(c.id)
c.fill_column()
c.set_inset_params()
c.label_column(hardcoding=None)
```
| github_jupyter |
Sometimes it is useful to take a random choice between two or more options.
Numpy has a function for that, called `random.choice`:
```
import numpy as np
```
Say we want to choose randomly between 0 and 1. We want an equal probability of getting 0 and getting 1. We could do it like this:
```
np.random.randint(0, 2)
```
If we do that lots of times, we see that we have a roughly 50% chance of getting 0 (and therefore, a roughly 50% chance of getting 1).
```
# Make 10000 random numbers that can be 0 or 1, with equal probability.
lots_of_0_1 = np.random.randint(0, 2, size=10000)
# Count the proportion that are 1.
np.count_nonzero(lots_of_0_1) / 10000
```
Run the cell above a few times to confirm you get numbers very close to 0.5.
Another way of doing this is to use `np.random.choice`.
As usual, check the arguments that the function expects with `np.random.choice?` in a notebook cell.
The first argument is a sequence, like a list, with the options that Numpy should chose from.
For example, we can ask Numpy to choose randomly from the list `[0, 1]`:
```
np.random.choice([0, 1])
```
A second `size` argument to the function says how many items to choose:
```
# Ten numbers, where each has a 50% chance of 0 and 50% chance of 1.
np.random.choice([0, 1], size=10)
```
By default, Numpy will chose each item in the sequence with equal probability, In this case, Numpy will chose 0 with 50% probability, and 1 with 50% probability:
```
# Use choice to make another 10000 random numbers that can be 0 or 1,
# with equal probability.
more_0_1 = np.random.choice([0, 1], size=10000)
# Count the proportion that are 1.
np.count_nonzero(more_0_1) / 10000
```
If you want, you can change these proportions with the `p` argument:
```
# Use choice to make another 10000 random numbers that can be 0 or 1,
# where 0 has probability 0.25, and 1 has probability 0.75.
weighted_0_1 = np.random.choice([0, 1], size=10000, p=[0.25, 0.75])
# Count the proportion that are 1.
np.count_nonzero(weighted_0_1) / 10000
```
There can be more than two choices:
```
# Use choice to make another 10000 random numbers that can be 0 or 10 or 20, or
# 30, where each has probability 0.25.
multi_nos = np.random.choice([0, 10, 20, 30], size=10000)
multi_nos[:10]
np.count_nonzero(multi_nos == 30) / 10000
```
The choices don't have to be numbers:
```
np.random.choice(['Heads', 'Tails'], size=10)
```
You can also do choices *without replacement*, so once you have chosen an element, all subsequent choices cannot chose that element again. For example, this *must* return all the elements from the choices, but in random order:
```
np.random.choice([0, 10, 20, 30], size=4, replace=False)
```
| github_jupyter |
```
package_jar = '../target/spark-data-repair-plugin_2.12_spark3.2_0.1.0-EXPERIMENTAL-with-dependencies.jar'
import numpy as np
import pandas as pd
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql import functions as f
spark = SparkSession.builder \
.config('spark.jars', package_jar) \
.config('spark.deriver.memory', '8g') \
.enableHiveSupport() \
.getOrCreate()
# Suppresses user warinig messages in Python
import warnings
warnings.simplefilter("ignore", UserWarning)
# Suppresses `WARN` messages in JVM
spark.sparkContext.setLogLevel("ERROR")
from repair.api import Scavenger
Scavenger().version()
spark.read.option("header", True).csv("../testdata/adult.csv").createOrReplaceTempView("adult")
spark.table('adult').printSchema()
import altair as alt
charts = []
pdf = spark.table('adult').toPandas()
for c in [c for c in pdf.columns if c != 'tid']:
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(c), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=300, height=300))
alt.hconcat(*charts)
from repair.detectors import NullErrorDetector, ConstraintErrorDetector
error_detectors = [
ConstraintErrorDetector(constraint_path="../testdata/adult_constraints.txt"),
NullErrorDetector()
]
from repair.model import RepairModel
model = RepairModel().setTableName('adult').setRowId('tid')
noisy_cells_df, noisy_columns = model.setErrorDetectors(error_detectors)._detect_errors('adult', 8, 20)
import altair as alt
pdf = noisy_cells_df.toPandas()
alt.Chart(pdf).mark_bar().encode(x=alt.X('attribute'), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=400, height=400)
discretized_table, discretized_columns, distinct_stats = model._discretize_attrs('adult')
discretized_columns
target_columns = list(filter(lambda c: c in discretized_columns, noisy_columns))
target_columns
cell_domain, pairwise_stats = model._analyze_error_cell_domain(noisy_cells_df, discretized_table, [], target_columns, discretized_columns, 20)
import altair as alt
charts = []
for target, cols in pairwise_stats.items():
pdf = pd.DataFrame(cols, columns=[target, 'cor'])
pdf['cor'] = pdf['cor'].astype('float')
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(target), y=alt.Y('cor')).properties(width=200, height=200))
alt.hconcat(*charts)
error_cells_df, weak_labeled_cells_df_opt = model._extract_error_cells(noisy_cells_df, cell_domain, 20, 8)
repair_base_df = model._prepare_repair_base_cells('adult', noisy_cells_df, target_columns, 20, 8)
repair_base_df = model._repair_attrs(weak_labeled_cells_df_opt, repair_base_df)
import altair as alt
charts = []
pdf = repair_base_df.toPandas()
for c in [c for c in pdf.columns if c != 'tid']:
charts.append(alt.Chart(pdf).mark_bar().encode(x=alt.X(c), y=alt.Y('count()', axis=alt.Axis(title='freq'))).properties(width=300, height=300))
alt.hconcat(*charts)
target = 'Sex'
pdf = repair_base_df.toPandas()
pdf = pdf.dropna()
X = pdf.drop(['tid', target], axis=1).reset_index(drop=True)
y = pdf[target].reset_index(drop=True)
import category_encoders as ce
se = ce.OrdinalEncoder(handle_unknown='impute')
X = se.fit_transform(X)
X
import altair as alt
pdf = pd.concat([X, y], axis=1)
alt.Chart(pdf).mark_circle().encode(
alt.X(alt.repeat("column"), type='quantitative'),
alt.Y(alt.repeat("row"), type='quantitative'),
color=f'{target}:N'
).properties(width=200, height=200).repeat(row=X.columns.tolist(), column=X.columns.tolist())
# One of non-linear embedding in sklearn
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
_X = tsne.fit_transform(X)
tsne.kl_divergence_
import altair as alt
_X = pd.DataFrame({'tSNE-X': _X[:, 0], 'tSNE-Y': _X[:, 1], target: y})
alt.Chart(_X).mark_point().encode(x='tSNE-X', y='tSNE-Y', color=f'{target}:N').properties(width=600, height=400).interactive()
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
rf.fit(X, y)
print('SCORE with ALL Features: %1.2f\n' % rf.score(X, y))
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
fs = BorutaPy(rf, n_estimators='auto', random_state=0)
fs.fit(X.values, y.values)
selected = fs.support_
print('Selected Features: %s' % ','.join(X.columns[selected]))
X_selected = X[X.columns[selected]]
rf = RandomForestClassifier(n_jobs=-1, max_depth=5)
rf.fit(X_selected, y)
print('SCORE with selected Features: %1.2f' % rf.score(X_selected, y))
```
| github_jupyter |
# Capsule Network
In this notebook i will try to explain and implement Capsule Network. MNIST images will be used as an input.
To implement capsule Network, we need to understand what are capsules first and what advantages do they have compared to convolutional neural network.
### so what are capsules?
* Briefly explaining it, capsules are small group of neurons where each neuron in a capsule represents various properties of a particular image part.
* Capsules represent relationships between parts of a whole object by using **dynamic routing** to weight the connections between one layer of capsules and the next and creating strong connections between spatially-related object parts, will be discussed later.
* The output of each capsule is a vector, this vector has a magnitude and orientation.
* Magnitude : It is an indicates if that particular part of image is present or not. Basically we can summerize it as the probability of the part existance (It has to be between 0 and 1).
* Oriantation : It changes if one of the properties of that particular image has changed.
Let us have an example to understand it more and make it clear.
As shown in the following image, capsules will detect a cat's face. As shown in the image the capsule consists of neurals with properties like the position,color,width and etc.. .Then we get a vector output with magnitude 0.9 which means we have 90% confidence that this is a cat face and we will get an orientation as well.

(image from : https://cezannec.github.io/Capsule_Networks/)
But what if we have changed in these properties like we have flipped the cat's face,what will happen ? will it detect the cat face?
Yes it still will detect the cat's face with 90% confidance(with magnitude 0.9) but there will be a change in the oriantation(theta)to indicate a change in the properties.

(image from: https://cezannec.github.io/Capsule_Networks/ )
### What advantages does it have compared to Convolutional Neural Network(CNN)?
* CNN is looking for key features regadless their position. As shown in the following image, CNN will detect the left image as a face while capsule network will not detect them as it will check if they are in the correct postition or not.

(image from:https://kndrck.co/posts/capsule_networks_explained/)
* Capsules network is more rubust to affine transformations in data. if translation or rotation is done on test data, atrained Capsule network will preform better and will give higher accuracy than normal CNN.
# Model Architecture
The capsule network is consisting of two main parts:
* A convolutional encoder.
* A fully connected, linear decoder.

(image from :[Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf) )
In this Explantaion and implementation i will follow the architecture from [Hinton paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf)
# 1)Encoder
The ecnoder consists of three main layers as shown in the following image and the input layer which is from MNIST which has a dimension of 28 x28
please notice the difference between this image and the previous image where the last layer is the decoder in the pravious image.

## A)The convolutional layer
So in Hinton's paper they have applied a kernel of size 9x9 to the input layer. This kernel has a depth of 256,stride =1 and padding = 0.This will give us an output of a dimenstion 20x20.
**Note** :
you can calculate the output dimenstion by this eqaution, output = [(w-k+2p)/s]+1 , where:
- w is the input size
- k is the kernel size
- p is padding
- s is stride
So to clarify this more:
- The input's dimension is (28,28,1) where the 28x28 is the input size and 1 is the number of channels.
- Kernel's dimention is (9,9,1,256) where 9x9 is the kernel size ,1 is the number of channels and 256 is the depth of the kernel .
- The output's dimension is (20,20,256) where 20x20 is the ouptut size and 256 is the stack of filtered images.
I think we are ready to start implementing the code now, so let us start by obtaining the MNIST data and create our DataLoaders for training and testing purposes.
```
# import resources
import numpy as np
import torch
# random seed (for reproducibility)
seed = 1
# set random seed for numpy
np.random.seed(seed)
# set random seed for pytorch
torch.manual_seed(seed)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to Tensors
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size,
num_workers=num_workers)
```
The nexts step is to create the convolutional layer as we explained:
```
import torch.nn as nn
import torch.nn.functional as F
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=256):
'''Constructs the ConvLayer with a specified input and output size.
These sizes has initial values from the paper.
param input_channel: input depth of an image, default value = 1
param output_channel: output depth of the convolutional layer, default value = 256
'''
super(ConvLayer, self).__init__()
# defining a convolutional layer of the specified size
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=9, stride=1, padding=0)
def forward(self, x):
# applying a ReLu activation to the outputs of the conv layer
output = F.relu(self.conv(x)) # we will have dimensions (batch_size, 20, 20, 256)
return output
```
## B)Primary capsules
This layer is tricky but i will try to simplify it as much as i can.
We would like to convolute the first layer to a new layer with 8 primary capsules.
To do so we will follow Hinton's paper steps:
- First step is to convolute our first Convolutional layer which has a dimension of (20 ,20 ,256) with a kernel of dimension(9,9,256,256) in which 9 is the kernel size,first 256 is the number of chanels from the first layer and the second 256 is the number of filters or the depth of the kernel.We will get an output with a dimension of (6,6,256) .
- second step is to reshape this output to (6,6,8,32) where 8 is the number of capsules and 32 is the depth of each capsule .
- Now the output of each capsule will have a dimension of (6,6,32) and we will reshape it to (32x32x6,1) = (1152,1) for each capsule.
- Final step we will squash the output to have a magnitute between 0 and 1 as we have discussed earlier using the following equation :

where Vj is the normalized output vector of capsule j, Sj is the total inputs of each capsule (which is the sum of weights over all the output vectors from the capsules in the layer below capsule).
We will use ModuleList container to loop on each capsule we have.
```
class PrimaryCaps(nn.Module):
def __init__(self, num_capsules=8, in_channels=256, out_channels=32):
'''Constructs a list of convolutional layers to be used in
creating capsule output vectors.
param num_capsules: number of capsules to create
param in_channels: input depth of features, default value = 256
param out_channels: output depth of the convolutional layers, default value = 32
'''
super(PrimaryCaps, self).__init__()
# creating a list of convolutional layers for each capsule I want to create
# all capsules have a conv layer with the same parameters
self.capsules = nn.ModuleList([
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=9, stride=2, padding=0)
for _ in range(num_capsules)])
def forward(self, x):
'''Defines the feedforward behavior.
param x: the input; features from a convolutional layer
return: a set of normalized, capsule output vectors
'''
# get batch size of inputs
batch_size = x.size(0)
# reshape convolutional layer outputs to be (batch_size, vector_dim=1152, 1)
u = [capsule(x).view(batch_size, 32 * 6 * 6, 1) for capsule in self.capsules]
# stack up output vectors, u, one for each capsule
u = torch.cat(u, dim=-1)
# squashing the stack of vectors
u_squash = self.squash(u)
return u_squash
def squash(self, input_tensor):
'''Squashes an input Tensor so it has a magnitude between 0-1.
param input_tensor: a stack of capsule inputs, s_j
return: a stack of normalized, capsule output vectors, v_j
'''
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
```
## c)Digit capsules
As we have 10 digit classes from 0 to 9, this layer will have 10 capsules each capsule is for one digit.
Each capsule takes an input of a batch of 1152 dimensional vector while the output is a ten 16 dimnsional vector.
### Dynamic Routing
Dynamic routing is used to find the best matching between the best connections between the child layer and the possible parent.Main companents of the dynamic routing is the capsule routing.
To make it easier we can think of the capsule routing as it is backprobagation.we can use it to obtain the probability that a certain capsule’s output should go to a parent capsule in the next layer.
As shown in the following figure The first child capsule is connected to $s_{1}$ which is the fist possible parent capsule and to $s_{2}$ which is the second possible parent capsule.In the begining the coupling will have equal values like both of them are zeros then we start apply dynamic routing to adjust it.We will find for example that coupling coffecient connected with $s_{1}$ is 0.9 and coupling coffecient connected with $s_{2}$ is 0.1, that means the probability that first child capsule’s output should go to a parent capsule in the next layer.

**Notes**
- Across all connections between one child capsule and all possible parent capsules, the coupling coefficients should sum to 1.This means That $c_{11}$ + $c_{12}$ = 1
- As shown in the following figure $s_{1}$ is the total inputs of each capsule (which is the sum of weights over all the output vectors from the capsules in the layer below capsule).
- To check the similarity between the total inputs $s_{1}$ and each vector we will calculate the dot product between both of them, in this example we will find that $s_{1}$ is more similar to $u_{1}$ than $u_{2}$ or $u_{3}$ , This similarity called (agreement)

### Dynamic Routing Algorithm
The followin algorithm is from [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf)

we can simply explain the algorithm as folowing :
- First we initialize the initial logits $b_{ij}$ of the softmax function with zero
- calculate the capsule coefficiant using the softmax equation.
$$c_{ij} = \frac{e^{\ b_{ij}}}{\sum_{k}\ {e^{\ b_{ik}}}} $$
- calculate the total capsule inputs $s_{1}$ .
**Note**
''
- $ s_j = \sum{c_{ij} \ \hat{u}}$
- $ \hat{u} = Wu $ where W is the weight matrix and u is the input vector
''
- squash to get a normalized vector output $v_{j}$
- last step is composed of two steps, we will calculate agreement and the new $b_{ij}$ .The similarity (agremeent) is that we have discussed before,which is the cross product between prediction vector $\hat{u}$ and parent capsule's output vector $s_{1}$ . The second step is to update $b_{ij}$ .
$$\hat{u} = W u $$$$a = v \cdot u $$$$b_{ij} = b_{ij} + a $$
```
def softmax(input_tensor, dim=1): # to get transpose softmax function # for multiplication reason s_J
# transpose input
transposed_input = input_tensor.transpose(dim, len(input_tensor.size()) - 1)
# calculate softmax
softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)), dim=-1)
# un-transpose result
return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input_tensor.size()) - 1)
# dynamic routing
def dynamic_routing(b_ij, u_hat, squash, routing_iterations=3):
'''Performs dynamic routing between two capsule layers.
param b_ij: initial log probabilities that capsule i should be coupled to capsule j
param u_hat: input, weighted capsule vectors, W u
param squash: given, normalizing squash function
param routing_iterations: number of times to update coupling coefficients
return: v_j, output capsule vectors
'''
# update b_ij, c_ij for number of routing iterations
for iteration in range(routing_iterations):
# softmax calculation of coupling coefficients, c_ij
c_ij = softmax(b_ij, dim=2)
# calculating total capsule inputs, s_j = sum(c_ij*u_hat)
s_j = (c_ij * u_hat).sum(dim=2, keepdim=True)
# squashing to get a normalized vector output, v_j
v_j = squash(s_j)
# if not on the last iteration, calculate agreement and new b_ij
if iteration < routing_iterations - 1:
# agreement
a_ij = (u_hat * v_j).sum(dim=-1, keepdim=True)
# new b_ij
b_ij = b_ij + a_ij
return v_j # return latest v_j
```
After implementing the dynamic routing we are ready to implement the Digitcaps class,which consisits of :
- This layer is composed of 10 "digit" capsules, one for each of our digit classes 0-9.
- Each capsule takes, as input, a batch of 1152-dimensional vectors produced by our 8 primary capsules, above.
- Each of these 10 capsules is responsible for producing a 16-dimensional output vector.
- we will inizialize the weights matrix randomly.
```
# it will also be relevant, in this model, to see if I can train on gpu
TRAIN_ON_GPU = torch.cuda.is_available()
if(TRAIN_ON_GPU):
print('Training on GPU!')
else:
print('Only CPU available')
class DigitCaps(nn.Module):
def __init__(self, num_capsules=10, previous_layer_nodes=32*6*6,
in_channels=8, out_channels=16):
'''Constructs an initial weight matrix, W, and sets class variables.
param num_capsules: number of capsules to create
param previous_layer_nodes: dimension of input capsule vector, default value = 1152
param in_channels: number of capsules in previous layer, default value = 8
param out_channels: dimensions of output capsule vector, default value = 16
'''
super(DigitCaps, self).__init__()
# setting class variables
self.num_capsules = num_capsules
self.previous_layer_nodes = previous_layer_nodes # vector input (dim=1152)
self.in_channels = in_channels # previous layer's number of capsules
# starting out with a randomly initialized weight matrix, W
# these will be the weights connecting the PrimaryCaps and DigitCaps layers
self.W = nn.Parameter(torch.randn(num_capsules, previous_layer_nodes,
in_channels, out_channels))
def forward(self, u):
'''Defines the feedforward behavior.
param u: the input; vectors from the previous PrimaryCaps layer
return: a set of normalized, capsule output vectors
'''
# adding batch_size dims and stacking all u vectors
u = u[None, :, :, None, :]
# 4D weight matrix
W = self.W[:, None, :, :, :]
# calculating u_hat = W*u
u_hat = torch.matmul(u, W)
# getting the correct size of b_ij
# setting them all to 0, initially
b_ij = torch.zeros(*u_hat.size())
# moving b_ij to GPU, if available
if TRAIN_ON_GPU:
b_ij = b_ij.cuda()
# update coupling coefficients and calculate v_j
v_j = dynamic_routing(b_ij, u_hat, self.squash, routing_iterations=3)
return v_j # return final vector outputs
def squash(self, input_tensor):
'''Squashes an input Tensor so it has a magnitude between 0-1.
param input_tensor: a stack of capsule inputs, s_j
return: a stack of normalized, capsule output vectors, v_j
'''
# same squash function as before
squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)
scale = squared_norm / (1 + squared_norm) # normalization coeff
output_tensor = scale * input_tensor / torch.sqrt(squared_norm)
return output_tensor
```
# 2)Decoder
As shown in the following figure from [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf), The decoder is made of three fully-connected, linear layers. The first layer sees the 10, 16-dimensional output vectors from the digit capsule layer and produces hidden_dim=512 number of outputs. The next hidden layer = 1024 , and the third and final linear layer produces an output of 784 values which is a 28x28 image!

```
class Decoder(nn.Module):
def __init__(self, input_vector_length=16, input_capsules=10, hidden_dim=512):
'''Constructs an series of linear layers + activations.
param input_vector_length: dimension of input capsule vector, default value = 16
param input_capsules: number of capsules in previous layer, default value = 10
param hidden_dim: dimensions of hidden layers, default value = 512
'''
super(Decoder, self).__init__()
# calculate input_dim
input_dim = input_vector_length * input_capsules
# define linear layers + activations
self.linear_layers = nn.Sequential(
nn.Linear(input_dim, hidden_dim), # first hidden layer
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim*2), # second, twice as deep
nn.ReLU(inplace=True),
nn.Linear(hidden_dim*2, 28*28), # can be reshaped into 28*28 image
nn.Sigmoid() # sigmoid activation to get output pixel values in a range from 0-1
)
def forward(self, x):
'''Defines the feedforward behavior.
param x: the input; vectors from the previous DigitCaps layer
return: two things, reconstructed images and the class scores, y
'''
classes = (x ** 2).sum(dim=-1) ** 0.5
classes = F.softmax(classes, dim=-1)
# find the capsule with the maximum vector length
# here, vector length indicates the probability of a class' existence
_, max_length_indices = classes.max(dim=1)
# create a sparse class matrix
sparse_matrix = torch.eye(10) # 10 is the number of classes
if TRAIN_ON_GPU:
sparse_matrix = sparse_matrix.cuda()
# get the class scores from the "correct" capsule
y = sparse_matrix.index_select(dim=0, index=max_length_indices.data)
# create reconstructed pixels
x = x * y[:, :, None]
# flatten image into a vector shape (batch_size, vector_dim)
flattened_x = x.contiguous().view(x.size(0), -1)
# create reconstructed image vectors
reconstructions = self.linear_layers(flattened_x)
# return reconstructions and the class scores, y
return reconstructions, y
```
Now let us collect all these layers (classes that we have created i.e ConvLayer,PrimaryCaps,DigitCaps,Decoder) in one class called CapsuleNetwork.
```
class CapsuleNetwork(nn.Module):
def __init__(self):
'''Constructs a complete Capsule Network.'''
super(CapsuleNetwork, self).__init__()
self.conv_layer = ConvLayer()
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps()
self.decoder = Decoder()
def forward(self, images):
'''Defines the feedforward behavior.
param images: the original MNIST image input data
return: output of DigitCaps layer, reconstructed images, class scores
'''
primary_caps_output = self.primary_capsules(self.conv_layer(images))
caps_output = self.digit_capsules(primary_caps_output).squeeze().transpose(0,1)
reconstructions, y = self.decoder(caps_output)
return caps_output, reconstructions, y
```
Let us now instantiate the model and print it.
```
# instantiate and print net
capsule_net = CapsuleNetwork()
print(capsule_net)
# move model to GPU, if available
if TRAIN_ON_GPU:
capsule_net = capsule_net.cuda()
```
# Loss
The loss for a capsule network is a weighted combination of two losses:
1. Reconstraction loss
2. Margin loss
### Reconstraction Loss
- It checks how the reconstracted image which we get from the decoder diferent from the original input image.
- It is calculated using mean squared error which is nn.MSELoss in pytorch.
- In [Hinton's paper(capsule networks orignal paper)](https://arxiv.org/pdf/1710.09829.pdf) they have weighted reconstraction loss with a coefficient of 0.0005, so it wouldn't overpower margin loss.
### Margin Loss
```
from IPython.display import Image
Image(filename='images/margin_loss.png')
```
Margin Loss is a classification loss (we can think of it as cross entropy) which is based on the length of the output vectors coming from the DigitCaps layer.
so let us try to elaborate it more on our example.Let us say we have an output vector called (x) coming from the digitcap layer, this ouput vector represents a certain digit from 0 to 9 as we are using MNIST. Then we will square the length(take the square root of the squared value) of the corresponding output vector of that digit capsule $v_k = \sqrt{x^2}$ . The right capsule should have an output vector of greater than or equal 0.9 ($v_k >=0.9$) value while other capsules should output of smaller than or eqaul 0.1( $v_k<=0.1$ ).
So, if we have an input image of a 0, then the "correct," zero-detecting, digit capsule should output a vector of magnitude 0.9 or greater! For all the other digits (1-9, in this example) the corresponding digit capsule output vectors should have a magnitude that is 0.1 or less.
The following function is used to calculate the margin loss as it sums both sides of the 0.9 and 0.1 and k is the digit capsule.
where($T_k = 1 $) if a digit of class k is present
and $m^{+}$ = 0.9 and $m^{-}$ = 0.1. The λ down-weighting
of the loss for absent digit classes stops the initial learning from shrinking the lengths of the activity vectors of all the digit capsules. In the paper they have choosen λ = 0.5.
**Note** :
The total loss is simply the sum of the losses of all digit capsules.
```
class CapsuleLoss(nn.Module):
def __init__(self):
'''Constructs a CapsuleLoss module.'''
super(CapsuleLoss, self).__init__()
self.reconstruction_loss = nn.MSELoss(reduction='sum') # cumulative loss, equiv to size_average=False
def forward(self, x, labels, images, reconstructions):
'''Defines how the loss compares inputs.
param x: digit capsule outputs
param labels:
param images: the original MNIST image input data
param reconstructions: reconstructed MNIST image data
return: weighted margin and reconstruction loss, averaged over a batch
'''
batch_size = x.size(0)
## calculate the margin loss ##
# get magnitude of digit capsule vectors, v_c
v_c = torch.sqrt((x**2).sum(dim=2, keepdim=True))
# calculate "correct" and incorrect loss
left = F.relu(0.9 - v_c).view(batch_size, -1)
right = F.relu(v_c - 0.1).view(batch_size, -1)
# sum the losses, with a lambda = 0.5
margin_loss = labels * left + 0.5 * (1. - labels) * right
margin_loss = margin_loss.sum()
## calculate the reconstruction loss ##
images = images.view(reconstructions.size()[0], -1)
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
# return a weighted, summed loss, averaged over a batch size
return (margin_loss + 0.0005 * reconstruction_loss) / images.size(0)
```
Now we have to call the custom loss class we have implemented and we will use Adam optimizer as in the paper.
```
import torch.optim as optim
# custom loss
criterion = CapsuleLoss()
# Adam optimizer with default params
optimizer = optim.Adam(capsule_net.parameters())
```
# Train the network
So the normal steps to do the training from a batch of data:
1. Clear the gradients of all optimized variables, by making them zero.
2. Forward pass: compute predicted outputs by passing inputs to the model
3. Calculate the loss .
4. Backward pass: compute gradient of the loss with respect to model parameters
5. Perform a single optimization step (parameter update)
6. Update average training loss
```
def train(capsule_net, criterion, optimizer,
n_epochs, print_every=300):
'''Trains a capsule network and prints out training batch loss statistics.
Saves model parameters if *validation* loss has decreased.
param capsule_net: trained capsule network
param criterion: capsule loss function
param optimizer: optimizer for updating network weights
param n_epochs: number of epochs to train for
param print_every: batches to print and save training loss, default = 100
return: list of recorded training losses
'''
# track training loss over time
losses = []
# one epoch = one pass over all training data
for epoch in range(1, n_epochs+1):
# initialize training loss
train_loss = 0.0
capsule_net.train() # set to train mode
# get batches of training image data and targets
for batch_i, (images, target) in enumerate(train_loader):
# reshape and get target class
target = torch.eye(10).index_select(dim=0, index=target)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# zero out gradients
optimizer.zero_grad()
# get model outputs
caps_output, reconstructions, y = capsule_net(images)
# calculate loss
loss = criterion(caps_output, target, images, reconstructions)
# perform backpropagation and optimization
loss.backward()
optimizer.step()
train_loss += loss.item() # accumulated training loss
# print and record training stats
if batch_i != 0 and batch_i % print_every == 0:
avg_train_loss = train_loss/print_every
losses.append(avg_train_loss)
print('Epoch: {} \tTraining Loss: {:.8f}'.format(epoch, avg_train_loss))
train_loss = 0 # reset accumulated training loss
return losses
# training for 5 epochs
n_epochs = 5
losses = train(capsule_net, criterion, optimizer, n_epochs=n_epochs)
```
Now let us plot the training loss to get more feeling how does the loss look like:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(losses)
plt.title("Training Loss")
plt.show()
```
# Test the trained network
Test the trained network on unseen data:
```
def test(capsule_net, test_loader):
'''Prints out test statistics for a given capsule net.
param capsule_net: trained capsule network
param test_loader: test dataloader
return: returns last batch of test image data and corresponding reconstructions
'''
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
test_loss = 0 # loss tracking
capsule_net.eval() # eval mode
for batch_i, (images, target) in enumerate(test_loader):
target = torch.eye(10).index_select(dim=0, index=target)
batch_size = images.size(0)
if TRAIN_ON_GPU:
images, target = images.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
caps_output, reconstructions, y = capsule_net(images)
# calculate the loss
loss = criterion(caps_output, target, images, reconstructions)
# update average test loss
test_loss += loss.item()
# convert output probabilities to predicted class
_, pred = torch.max(y.data.cpu(), 1)
_, target_shape = torch.max(target.data.cpu(), 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target_shape.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target_shape.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# avg test loss
avg_test_loss = test_loss/len(test_loader)
print('Test Loss: {:.8f}\n'.format(avg_test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# return last batch of capsule vectors, images, reconstructions
return caps_output, images, reconstructions
# call test function and get reconstructed images
caps_output, images, reconstructions = test(capsule_net, test_loader)
```
Now it is time to dispaly the reconstructions:
```
def display_images(images, reconstructions):
'''Plot one row of original MNIST images and another row (below)
of their reconstructions.'''
# convert to numpy images
images = images.data.cpu().numpy()
reconstructions = reconstructions.view(-1, 1, 28, 28)
reconstructions = reconstructions.data.cpu().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(26,5))
# input images on top row, reconstructions on bottom
for images, row in zip([images, reconstructions], axes):
for img, ax in zip(images, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display original and reconstructed images, in rows
display_images(images, reconstructions)
```
| github_jupyter |
# Scenario Analysis: Pop Up Shop

Kürschner (talk) 17:51, 1 December 2020 (UTC), CC0, via Wikimedia Commons
```
# install Pyomo and solvers for Google Colab
import sys
if "google.colab" in sys.modules:
!wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py
%run install_on_colab.py
```
## The problem
There is an opportunity to operate a pop-up shop to sell a unique commemorative item for events held at a famous location. The items cost 12 € each and will selL for 40 €. Unsold items can be returned to the supplier at a value of only 2 € due to their commemorative nature.
| Parameter | Symbol | Value |
| :---: | :---: | :---: |
| sales price | $r$ | 40 € |
| unit cost | $c$ | 12 € |
| salvage value | $w$ | 2 € |
Profit will increase with sales. Demand for these items, however, will be high only if the weather is good. Historical data suggests the following scenarios.
| Scenario ($s$) | Demand ($d_s$) | Probability ($p_s$) |
| :---: | :-----: | :----------: |
| Sunny Skies | 650 | 0.10 |
| Good Weather | 400 | 0.60 |
| Poor Weather | 200 | 0.30 |
The problem is to determine how many items to order for the pop-up shop.
The dilemma is that the weather won't be known until after the order is placed. Ordering enough items to meet demand for a good weather day results in a financial penalty on returned goods if the weather is poor. But ordering just enough items to satisfy demand on a poor weather day leaves "money on the table" if the weather is good.
How many items should be ordered for sale?
## Expected value for the mean scenario (EVM)
A naive solution to this problem is to place an order equal to the expected demand. The expected demand is given by
$$
\begin{align*}
\mathbb E[D] & = \sum_{s\in S} p_s d_s
\end{align*}
$$
Choosing an order size $x = \mathbb E[d]$ results in an expected profit we call the **expected value of the mean scenario (EVM)**.
Variable $y_s$ is the actual number of items sold if scenario $s$ should occur. The number sold is the lesser of the demand $d_s$ and the order size $x$.
$$
\begin{align*}
y_s & = \min(d_s, x) & \forall s \in S
\end{align*}
$$
Any unsold inventory $x - y_s$ remaining after the event will be sold at the salvage price $w$. Taking into account the revenue from sales $r y_s$, the salvage value of the unsold inventory $w(x - y_s)$, and the cost of the order $c x$, the profit $f_s$ for scenario $s$ is given by
$$
\begin{align*}
f_s & = r y_s + w (x - y_s) - c x & \forall s \in S
\end{align*}
$$
The average or expected profit is given by
$$
\begin{align*}
\text{EVM} = \mathbb E[f] & = \sum_{s\in S} p_s f_s
\end{align*}
$$
These calculations can be executed using operations on the pandas dataframe. Let's begin by calculating the expected demand.
Below we create a pandas DataFrame object to store the scenario data.
```
import numpy as np
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"probability": 0.10, "demand": 650},
"good weather": {"probability": 0.60, "demand": 400},
"poor weather": {"probability": 0.30, "demand": 200},
}
df = pd.DataFrame.from_dict(scenarios).T
display(df)
expected_demand = sum(df["probability"] * df["demand"])
print(f"Expected demand = {expected_demand}")
```
Subsequent calculations can be done directly withthe pandas dataframe holding the scenario data.
```
df["order"] = expected_demand
df["sold"] = df[["demand", "order"]].min(axis=1)
df["salvage"] = df["order"] - df["sold"]
df["profit"] = r * df["sold"] + w * df["salvage"] - c * df["order"]
EVM = sum(df["probability"] * df["profit"])
print(f"Mean demand = {expected_demand}")
print(f"Expected value of the mean demand (EVM) = {EVM}")
display(df)
```
## Expected value of the stochastic solution (EVSS)
The optimization problem is to find the order size $x$ that maximizes expected profit subject to operational constraints on the decision variables. The variables $x$ and $y_s$ are non-negative integers, while $f_s$ is a real number that can take either positive and negative values. The number of goods sold in scenario $s$ has to be less than the order size $x$ and customer demand $d_s$.
The problem to be solved is
$$
\begin{align*}
\text{EV} = & \max_{x, y_s} \mathbb E[F] = \sum_{s\in S} p_s f_s \\
\text{subject to:} \\
f_s & = r y_s + w(x - y_s) - c x & \forall s \in S\\
y_s & \leq x & \forall s \in S \\
y_s & \leq d_s & \forall s \in S
\end{align*}
$$
where $S$ is the set of all scenarios under consideration.
```
import pyomo.environ as pyo
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"demand": 650, "probability": 0.1},
"good weather": {"demand": 400, "probability": 0.6},
"poor weather": {"demand": 200, "probability": 0.3},
}
# create model instance
m = pyo.ConcreteModel('Pop-up Shop')
# set of scenarios
m.S = pyo.Set(initialize=scenarios.keys())
# decision variables
m.x = pyo.Var(domain=pyo.NonNegativeIntegers)
m.y = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.f = pyo.Var(m.S, domain=pyo.Reals)
# objective
@m.Objective(sense=pyo.maximize)
def EV(m):
return sum([scenarios[s]["probability"]*m.f[s] for s in m.S])
# constraints
@m.Constraint(m.S)
def profit(m, s):
return m.f[s] == r*m.y[s] + w*(m.x - m.y[s]) - c*m.x
@m.Constraint(m.S)
def sales_less_than_order(m, s):
return m.y[s] <= m.x
@m.Constraint(m.S)
def sales_less_than_demand(m, s):
return m.y[s] <= scenarios[s]["demand"]
# solve
solver = pyo.SolverFactory('glpk')
results = solver.solve(m)
# display solution using Pandas
print("Solver Termination Condition:", results.solver.termination_condition)
print("Expected Profit:", m.EV())
print()
for s in m.S:
scenarios[s]["order"] = m.x()
scenarios[s]["sold"] = m.y[s]()
scenarios[s]["salvage"] = m.x() - m.y[s]()
scenarios[s]["profit"] = m.f[s]()
df = pd.DataFrame.from_dict(scenarios).T
display(df)
```
Optimizing over all scenarios provides an expected profit of 8,920 €, an increase of 581 € over the base case of simply ordering the expected number of items sold. The new solution places a larger order. In poor weather conditions there will be more returns and lower profit that is more than compensated by the increased profits in good weather conditions.
The addtional value that results from solve of this planning problem is called the **Value of the Stochastic Solution (VSS)**. The value of the stochastic solution is the additional profit compared to ordering to meet expected in demand. In this case,
$$\text{VSS} = \text{EV} - \text{EVM} = 8,920 - 8,339 = 581$$
## Expected value with perfect information (EVPI)
Maximizing expected profit requires the size of the order be decided before knowing what scenario will unfold. The decision for $x$ has to be made "here and now" with probablistic information about the future, but without specific information on which future will actually transpire.
Nevertheless, we can perform the hypothetical calculation of what profit would be realized if we could know the future. We are still subject to the variability of weather, what is different is we know what the weather will be at the time the order is placed.
The resulting value for the expected profit is called the **Expected Value of Perfect Information (EVPI)**. The difference EVPI - EV is the extra profit due to having perfect knowledge of the future.
To compute the expected profit with perfect information, we let the order variable $x$ be indexed by the subsequent scenario that will unfold. Given decision varaible $x_s$, the model for EVPI becomes
$$
\begin{align*}
\text{EVPI} = & \max_{x_s, y_s} \mathbb E[f] = \sum_{s\in S} p_s f_s \\
\text{subject to:} \\
f_s & = r y_s + w(x_s - y_s) - c x_s & \forall s \in S\\
y_s & \leq x_s & \forall s \in S \\
y_s & \leq d_s & \forall s \in S
\end{align*}
$$
The following implementation is a variation of the prior cell.
```
import pyomo.environ as pyo
import pandas as pd
# price information
r = 40
c = 12
w = 2
# scenario information
scenarios = {
"sunny skies" : {"demand": 650, "probability": 0.1},
"good weather": {"demand": 400, "probability": 0.6},
"poor weather": {"demand": 200, "probability": 0.3},
}
# create model instance
m = pyo.ConcreteModel('Pop-up Shop')
# set of scenarios
m.S = pyo.Set(initialize=scenarios.keys())
# decision variables
m.x = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.y = pyo.Var(m.S, domain=pyo.NonNegativeIntegers)
m.f = pyo.Var(m.S, domain=pyo.Reals)
# objective
@m.Objective(sense=pyo.maximize)
def EV(m):
return sum([scenarios[s]["probability"]*m.f[s] for s in m.S])
# constraints
@m.Constraint(m.S)
def profit(m, s):
return m.f[s] == r*m.y[s] + w*(m.x[s] - m.y[s]) - c*m.x[s]
@m.Constraint(m.S)
def sales_less_than_order(m, s):
return m.y[s] <= m.x[s]
@m.Constraint(m.S)
def sales_less_than_demand(m, s):
return m.y[s] <= scenarios[s]["demand"]
# solve
solver = pyo.SolverFactory('glpk')
results = solver.solve(m)
# display solution using Pandas
print("Solver Termination Condition:", results.solver.termination_condition)
print("Expected Profit:", m.EV())
print()
for s in m.S:
scenarios[s]["order"] = m.x[s]()
scenarios[s]["sold"] = m.y[s]()
scenarios[s]["salvage"] = m.x[s]() - m.y[s]()
scenarios[s]["profit"] = m.f[s]()
df = pd.DataFrame.from_dict(scenarios).T
display(df)
```
## Summary
To summarize, have computed three different solutions to the problem of order size:
* The expected value of the mean solution (EVM) is the expected profit resulting from ordering the number of items expected to sold under all scenarios.
* The expected value of the stochastic solution (EVSS) is the expected profit found by solving an two-state optimization problem where the order size was the "here and now" decision without specific knowledge of which future scenario would transpire.
* The expected value of perfect information (EVPI) is the result of a hypotherical case where knowledge of the future scenario was somehow available when then order had to be placed.
For this example we found
| Solution | Value (€) |
| :------ | ----: |
| Expected Value of the Mean Solution (EVM) | 8,399.0 |
| Expected Value of the Stochastic Solution (EVSS) | 8,920.0 |
| Expected Value of Perfect Information (EVPI) | 10,220.0 |
These results verify our expectation that
$$
\begin{align*}
EVM \leq EVSS \leq EVPI
\end{align*}
$$
The value of the stochastic solution
$$
\begin{align*}
VSS = EVSS - EVM = 581
\end{align*}
$$
The value of perfect information
$$
\begin{align*}
VPI = EVPI - EVSS = 1,300
\end{align*}
$$
As one might expect, there is a cost that results from lack of knowledge about an uncertain future.
| github_jupyter |
```
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O cats_and_dogs_filtered.zip
! unzip cats_and_dogs_filtered.zip
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory="cats_and_dogs_filtered/train",target_size=(224,224))
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory="cats_and_dogs_filtered/validation", target_size=(224,224))
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=2, activation="softmax"))
from keras.optimizers import Adam
opt = Adam(lr=0.001)
model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.summary()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')
hist = model.fit_generator(steps_per_epoch=100,generator=traindata, validation_data= testdata, validation_steps=10,epochs=100,callbacks=[checkpoint,early])
import matplotlib.pyplot as plt
plt.plot(hist.history["acc"])
plt.plot(hist.history['val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
from keras.preprocessing import image
img = image.load_img("Pomeranian_01.jpeg",target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
from keras.models import load_model
saved_model = load_model("vgg16_1.h5")
output = saved_model.predict(img)
if output[0][0] > output[0][1]:
print("cat")
else:
print('dog')
```
| github_jupyter |
<!-- Copyright 2015 Google Inc. All rights reserved. -->
<!-- Licensed under the Apache License, Version 2.0 (the "License"); -->
<!-- you may not use this file except in compliance with the License. -->
<!-- You may obtain a copy of the License at -->
<!-- http://www.apache.org/licenses/LICENSE-2.0 -->
<!-- Unless required by applicable law or agreed to in writing, software -->
<!-- distributed under the License is distributed on an "AS IS" BASIS, -->
<!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -->
<!-- See the License for the specific language governing permissions and -->
<!-- limitations under the License. -->
# Getting started with the Google Genomics API
In this notebook we'll cover how to make authenticated requests to the [Google Genomics API](https://cloud.google.com/genomics/reference/rest/).
----
NOTE:
* If you're new to notebooks, or want to check out additional samples, check out the full [list](../) of general notebooks.
* For additional Genomics samples, check out the full [list](./) of Genomics notebooks.
## Setup
### Install Python libraries
We'll be using the [Google Python API client](https://github.com/google/google-api-python-client) for interacting with Genomics API. We can install this library, or any other 3rd-party Python libraries from the [Python Package Index (PyPI)](https://pypi.python.org/pypi) using the `pip` package manager.
There are [50+ Google APIs](http://api-python-client-doc.appspot.com/) that you can work against with the Google Python API Client, but we'll focus on the Genomics API in this notebook.
```
!pip install --upgrade google-api-python-client
```
### Create an Authenticated Client
Next we construct a Python object that we can use it to make requests.
The following snippet shows how we can authenticate using the service account on the Datalab host. For more detail about authentication from Python, see [Using OAuth 2.0 for Server to Server Applications](https://developers.google.com/api-client-library/python/auth/service-accounts).
```
from httplib2 import Http
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = Http()
credentials.authorize(http)
```
And then we create a client for the Genomics API.
```
from apiclient.discovery import build
genomics = build('genomics', 'v1', http=http)
```
### Send a request to the Genomics API
Now that we have a Python client for the Genomics API, we can access a variety of different resources. For details about each available resource, see the python client [API docs here](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/index.html).
Using our `genomics` client, we'll demonstrate fetching a Dataset resource by ID (the [1000 Genomes dataset](http://googlegenomics.readthedocs.org/en/latest/use_cases/discover_public_data/1000_genomes.html) in this case).
First, we need to construct a request object.
```
request = genomics.datasets().get(datasetId='10473108253681171589')
```
Next, we'll send this request to the Genomics API by calling the `request.execute()` method.
```
response = request.execute()
```
You will need enable the Genomics API for your project if you have not done so previously. Click on [this link](https://console.developers.google.com/flows/enableapi?apiid=genomics) to enable the API in your project.
The response object returned is simply a Python dictionary. Let's take a look at the properties returned in the response.
```
for entry in response.items():
print "%s => %s" % entry
```
Success! We can see the name of the specified Dataset and a few other pieces of metadata.
Accessing other Genomics API resources will follow this same set of steps. The full [list of available resources within the API is here](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/index.html). Each resource has details about the different verbs that can be applied (e.g., [Dataset methods](https://google-api-client-libraries.appspot.com/documentation/genomics/v1/python/latest/genomics_v1.datasets.html)).
## Access Data
In this portion of the notebook, we implement [this same example](https://github.com/googlegenomics/getting-started-with-the-api/tree/master/python) implemented as a python script. First let's define a few constants to use within the examples that follow.
```
dataset_id = '10473108253681171589' # This is the 1000 Genomes dataset ID
sample = 'NA12872'
reference_name = '22'
reference_position = 51003835
```
### Get read bases for a sample at specific a position
First find the read group set ID for the sample.
```
request = genomics.readgroupsets().search(
body={'datasetIds': [dataset_id], 'name': sample},
fields='readGroupSets(id)')
read_group_sets = request.execute().get('readGroupSets', [])
if len(read_group_sets) != 1:
raise Exception('Searching for %s didn\'t return '
'the right number of read group sets' % sample)
read_group_set_id = read_group_sets[0]['id']
```
Once we have the read group set ID, lookup the reads at the position in which we are interested.
```
request = genomics.reads().search(
body={'readGroupSetIds': [read_group_set_id],
'referenceName': reference_name,
'start': reference_position,
'end': reference_position + 1,
'pageSize': 1024},
fields='alignments(alignment,alignedSequence)')
reads = request.execute().get('alignments', [])
```
And we print out the results.
```
# Note: This is simplistic - the cigar should be considered for real code
bases = [read['alignedSequence'][
reference_position - int(read['alignment']['position']['position'])]
for read in reads]
print '%s bases on %s at %d are' % (sample, reference_name, reference_position)
from collections import Counter
for base, count in Counter(bases).items():
print '%s: %s' % (base, count)
```
### Get variants for a sample at specific a position
First find the call set ID for the sample.
```
request = genomics.callsets().search(
body={'variantSetIds': [dataset_id], 'name': sample},
fields='callSets(id)')
resp = request.execute()
call_sets = resp.get('callSets', [])
if len(call_sets) != 1:
raise Exception('Searching for %s didn\'t return '
'the right number of call sets' % sample)
call_set_id = call_sets[0]['id']
```
Once we have the call set ID, lookup the variants that overlap the position in which we are interested.
```
request = genomics.variants().search(
body={'callSetIds': [call_set_id],
'referenceName': reference_name,
'start': reference_position,
'end': reference_position + 1},
fields='variants(names,referenceBases,alternateBases,calls(genotype))')
variant = request.execute().get('variants', [])[0]
```
And we print out the results.
```
variant_name = variant['names'][0]
genotype = [variant['referenceBases'] if g == 0
else variant['alternateBases'][g - 1]
for g in variant['calls'][0]['genotype']]
print 'the called genotype is %s for %s' % (','.join(genotype), variant_name)
```
| github_jupyter |
```
import csv
from pprint import pprint
import random
import numpy as np
alphabet = ['',
'ا', 'ب', 'ت', 'ث','ج','ح', 'خ',
'د','ذ','ر','ز', 'س','ش','ص',
'ض','ط','ظ','ع','غ','ف','ق',
'ك','ل','م','ن','ه','و','ي',
'ء','ى','أ','ؤ']
def xalphabetin(char):
nums = list(char.encode('utf8'))
nums[0] = nums[0] - 216
num = (nums[0] * 256) + nums[1]
return num
def alphabetin(char):
if(char == 'ؤ'):
return 29
if(char == 'أ'):
return 29
if(char == 'ى'):
return 1
if(char == 'ئ'):
return 1
return alphabet.index(char)
def alphabetout(num):
return alphabet[num]
def binin(dcty):
x = np.zeros(20*512) # 20 letters max x (from unicode)
y = np.zeros((4*30) + 1) # 4 letters max y (mapped to alphabet) + 1 "no root" flag
lx = 0 # letter index
for letter in list(dcty['word']):
ix = (lx*512) + xalphabetin(letter)
x[ix] = 1
lx += 1
if dcty['rootsize'] > 0:
y[(0*30) + dcty['root1']] = 1
if dcty['rootsize'] > 1:
y[(1*30) + dcty['root2']] = 1
if dcty['rootsize'] > 2:
y[(2*30) + dcty['root3']] = 1
if dcty['rootsize'] > 3:
y[(3*30) + dcty['root4']] = 1
if dcty['rootsize'] == 0:
y[4*30] = 1 # no root
return np.array([x, y])
def binout(by):
root = ''
if by[120] == 1:
return ''
for yix in range(0, 30):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(30, 60):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(60, 90):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
for yix in range(90, 120):
if by[yix] == 1:
lix = yix % 30
root += alphabetout(lix)
break
if len(list(root)) == 2:
root += root[1]
return root
def transformin(row):
if(len(row[1]) == 0):
# null object
dcty = {
'word': row[0],
'rootsize': 0,
'root1': 0,
'root2': 0,
'root3': 0,
'root4': 0
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
rootlist = list(row[1])
rootsize = len(rootlist)
if(len(rootlist) == 2):
rootlist += [rootlist[1]]
rootsize = 3
if(rootlist[2] not in alphabet):
# null object
dcty = {
'word': row[0],
'rootsize': 0,
'root1': 0,
'root2': 0,
'root3': 0,
'root4': 0
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
if(len(rootlist) == 3):
rootlist += [""]
dcty = {
'word': row[0],
'rootsize': rootsize,
'root1': alphabetin(rootlist[0]),
'root2': alphabetin(rootlist[1]),
'root3': alphabetin(rootlist[2]),
'root4': alphabetin(rootlist[3])
}
binxy = binin(dcty)
dcty['x'] = binxy[0]
dcty['y'] = binxy[1]
return dcty
def transformout(data):
return [data['word'], alphabetout(data['root1']) + alphabetout(data['root2']) + alphabetout(data['root3']) + alphabetout(data['root4'])]
datain = []
with open('roots-all.csv') as csvfile:
readcsv = csv.reader(csvfile, delimiter=',')
next(readcsv)
for row in readcsv:
data = transformin(row)
if(data == False):
continue
datain += [data]
for i in range(3):
r = random.randint(0,len(datain))
pprint(transformout(datain[r]))
pprint(datain[r])
pprint(binout(datain[r]['y']))
print("\n")
from sklearn.model_selection import train_test_split
X = datain
y = np.array([d['y'] for d in datain])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.025)
DX_train = np.array([d['x'] for d in X_train])
DX_test = np.array([d['x'] for d in X_test])
pprint(np.shape(DX_train))
pprint(np.shape(DX_test))
pprint(np.shape(y_train))
pprint(np.shape(y_test))
from keras.models import Sequential
from keras import regularizers
from keras.layers import Dense
model = Sequential()
model.add(Dense(8000,
input_dim=10240,
kernel_initializer='normal',
activation='sigmoid'))
model.add(Dense(121,
kernel_initializer='normal',
activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(DX_train, y_train, validation_data=(DX_test, y_test), epochs=7, batch_size=200)
loss_and_metrics = model.evaluate(DX_test, y_test, batch_size=128)
pprint(loss_and_metrics)
def ytobin(y):
by = np.zeros(121)
if y[120] == 1:
by[120] == 1
return by
by[np.argmax(y[0:30])] = 1
by[np.argmax(y[30:60]) + 30] = 1
if np.max(y[60:90]) > 0.02:
by[np.argmax(y[60:90]) + 60] = 1
if np.max(y[90:120]) > 0.01:
by[np.argmax(y[90:120]) + 90] = 1
return by
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
score = []
for r in range(len(X_test)):
r_pred = model.predict(DX_test[r:r+1,:])[0]
if binout(ytobin(r_pred)) == transformout(X_test[r])[1]:
print("Correct: " + str(transformout(X_test[r])))
score += [1]
else:
print("Missed: " + str(transformout(X_test[r])) + " Predicted: " + binout(ytobin(r_pred)))
score += [0]
print("Score: " + str(round(100 * (np.sum(score) / len(score)), 1)) + "%")
x1 = np.array([transformin(["أرحام",""])['x']])
r_pred = model.predict([x1])[0]
print(binout(ytobin(r_pred)))
```
| github_jupyter |
## Data Preperation for the first Model
Welcome to the first notebook. Here we'll process the data from downloading to what we will be using to train our first model - **'Wh’re Art Thee Min’ral?'**.
The steps we'll be following here are:
- Downloading the SARIG Geochem Data Package. **(~350 Mb)**
- Understanding the data columns in our csv of interest.
- Cleaning and applying some processing.
- Saving our processed file into a csv.
- _And seeing some unnecessary memes in between_.
You can upload this notebook and run it on colab or on Jupyter-Notebook locally.
```
# import the required package - Pandas
import pandas as pd
```
You can simply download the data by clicking the link [here](https://unearthed-exploresa.s3-ap-southeast-2.amazonaws.com/Unearthed_5_SARIG_Data_Package.zip). You can also download it by simply running the cell down below.
We recommed you to use **Google Colab** and download it here itself if you have a poor internet connection.

Colab has a decent internet speed of around **~15-20 Mb/s** which is more than enough for the download.
```
# You can simply download the data by running this cell
!wget https://unearthed-exploresa.s3-ap-southeast-2.amazonaws.com/Unearthed_5_SARIG_Data_Package.zip
```
Here for extracting, if you wish to use the download file for a later use, than you can first mount your google drive and then extracting the files there. You can read more about mounting Google Drive to colab [here](https://towardsdatascience.com/downloading-datasets-into-google-drive-via-google-colab-bcb1b30b0166).
***Note** - One of the files is really big (~10 Gb) and so it might take some time to extract as well. *Don't think that it's stuck!*
```
# Let's first create a directory to extract the downloaded zip file.
!mkdir 'GeoChemData'
# Now let's unzip the files into the data directory that we created.
!unzip 'Unearthed_5_SARIG_Data_Package.zip' -d 'GeoChemData/'
# Read the df_details.csv
# We use unicode_escape as the encoding to avoid etf-8 error.
df_details = pd.read_csv('/content/GeoChemData/SARIG_Data_Package3_Exported06072020/sarig_dh_details_exp.csv', encoding= 'unicode_escape')
# Let's view the first few columns
df_details.head()
# Data Column Information
df_details.info()
```
### What columns do we need?
We only need the following three columns from this dataframe ->
- `LONGITUDE_GDA94`: This is the longitude of the mine/mineral location in **EPSG:4283** Co-ordinate Referencing System (CRS).
- `LATITUDE_GDA94`: This is the latitude of the mine/mineral location in **EPSG:4283** Co-ordinate Referencing System (CRS).
- `MINERAL_CLASS`: Mineral Class is a column containing **two unique values (Y/N)** representing if there is any mineralization or not.
> *Note - We are using GDA94 over GDA20 because of the former's standardness.* You can understand more about it our glossary's page [here]().
```
# Here the only relevant data we need is the location and the Mineral Class (Yes/No)
df_final = df_details[['LONGITUDE_GDA94','LATITUDE_GDA94', 'MINERAL_CLASS']]
# Drop the rows with null values
df_final = df_final.dropna()
# Lets print out a few rows of the new dataframe.
df_final.head()
# Let's check the data points in both classes
print("Number of rows with Mineral Class Yes is", len(df_final.query('MINERAL_CLASS=="Y"')))
print("Number of rows with Mineral Class No is", len(df_final.query('MINERAL_CLASS=="N"')))
```
The Total Number of rows in the new dataset is **147407 (Y) + 174436 (N) = 321843** which is quite sufficient for training our models over it.
Also the ratio of Class `'Y'` to Class `'N'` is 1 : 0.8 which is quite _**balanced**_.

Now that we have our csv, let's go ahead and save our progress into a new csv before the session expires!

```
# Create a new directory to save the csv.
!mkdir 'GeoChemData/exported'
# Convert the dataframe into a new csv file.
df_final.to_csv('GeoChemData/mod1_unsampled.csv')
# Finally if you are on google colab, you can simply download using ->
from google.colab import files
files.download('GeoChemData/exported/mod1_vectors.csv')
```
| github_jupyter |
# Black-Scholes Algorithm Using Numba-dppy
## Sections
- [Black Sholes algorithm](#Black-Sholes-algorithm)
- _Code:_ [Implementation of Black Scholes targeting CPU using Numba JIT](#Implementation-of-Black-Scholes-targeting-CPU-using-Numba-JIT)
- _Code:_ [Implementation of Black Scholes targeting GPU using Kernels](#Implementation-of-Black-Scholes-targeting-GPU-using-Kernels)
- _Code:_ [Implementation of Black Scholes targeting GPU using Numpy](#Implementation-of-Black-Scholes-targeting-GPU-using-Numpy)
## Learning Objectives
* Build a Numba implementation of Black Scholes targeting CPU and GPU using Numba Jit
* Build a Numba-DPPY implementation of Black Scholes on CPU and GPU using Kernel approach
* Build a Numba-DPPY implementation of Black Scholes on GPU using Numpy approach
## numba-dppy
Numba-dppy is a standalone extension to the Numba JIT compiler that adds SYCL programming capabilities to Numba. Numba-dppy is packaged as part of the IDP that comes with oneAPI base toolkit, and you don’t need to install any specific Conda packages. The support for SYCL is via DPC++'s SYCL runtime and other SYCL compilers are not supported by Numba-dppy.
## Black Sholes algorithm
The Black-Scholes program computes the price of a portfolio of options using partial differential equations. The entire computation performed by Black-Scholes is data-parallel, where each option can be priced independent of other options.
The Black-Scholes Model is one of the most important concepts in modern quantitative finance theory. Developed in 1973 by Fisher Black, Robert Merton, and Myron Scholes; it is still widely used today, and regarded as one of the best ways to determine fair prices of financial derivatives.
### Implementation of Black-Scholes Formula
The Black-Scholes formula is used widely in almost every aspect of quantitative finance. The Black-Scholes calculation has essentially permeated every quantitative finance library by traders and quantitative analysts alike.
Let’s look at a hypothetic situation in which a firm has to calculate European options for millions of financial instruments. For each instrument, it has current price, strike price, and option expiration time. For each set of these data, it makes several thousand Black-Scholes calculations, much like the way options of neighboring stock prices, strike prices, and different option expiration times were calculated.
# Implementation of Black Scholes targeting CPU using Numba JIT
In the following example, we introduce a naive Black-Sholes implementation that targets a CPU using the Numba JIT, where we calculate the Black-Sholes formula as described:
This is the decorator-based approach, where we offload data parallel code sections like parallel-for, and certain NumPy function calls. With the decorator method, a programmer needs to simply identify the most time-consuming parts of the program. If those parts can be parallelized, the programmer needs to just annotate those sections using Numba-DPPy, and can expect those code sections to execute on a GPU.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_jit_cpu.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf
import numba as nb
from math import log, sqrt, exp, erf
# blackscholes implemented as a parallel loop using numba.prange
@nb.njit(parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
for i in nb.prange(nopt):
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to CPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf.run("Numba@jit-loop-par", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_jit_cpu.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_jit_cpu.sh; else ./run_black_sholes_jit_cpu.sh; fi
```
# Implementation of Black Scholes targeting GPU using Numba JIT
In the below example we introduce to a Naive Blacksholes implementation that targets a GPU using the Numba Jit where we calculate the blacksholes formula as described above.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_jit_gpu.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_gpu
import numba as nb
from math import log, sqrt, exp, erf
# blackscholes implemented as a parallel loop using numba.prange
@nb.njit(parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
for i in nb.prange(nopt):
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_gpu.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_gpu.run("Numba@jit-loop-par", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_jit_gpu.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_jit_gpu.sh; else ./run_black_sholes_jit_gpu.sh; fi
```
# Implementation of Black Scholes targeting GPU using Kernels
## Writing Explicit Kernels in numba-dppy
Writing a SYCL kernel using the `@numba_dppy.kernel` decorator has similar syntax to writing OpenCL kernels. As such, the numba-dppy module provides similar indexing and other functions as OpenCL. The indexing functions supported inside a `numba_dppy.kernel` are:
* numba_dppy.get_local_id : Gets the local ID of the item
* numba_dppy.get_local_size: Gets the local work group size of the device
* numba_dppy.get_group_id : Gets the group ID of the item
* numba_dppy.get_num_groups: Gets the number of gropus in a worksgroup
Refer https://intelpython.github.io/numba-dppy/latest/user_guides/kernel_programming_guide/index.html for more details.
In the following example we use dppy-kernel approach for explicit kernel programming where if the programmer wants to extract further performance from the offloaded code, the programmer can use the explicit kernel programming approach using dppy-kernels and tune the GPU parameterswhere we take advantage of the workgroups and the workitems in a device using the kernel approach
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_kernel.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_gpu
import numba_dppy
from math import log, sqrt, exp, erf
# blackscholes implemented using dppy.kernel
@numba_dppy.kernel(
access_types={"read_only": ["price", "strike", "t"], "write_only": ["call", "put"]}
)
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
i = numba_dppy.get_global_id(0)
P = price[i]
S = strike[i]
T = t[i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[i] = r
put[i] = r - P + Se
def black_scholes_driver(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_gpu.get_device_selector()):
black_scholes[nopt, numba_dppy.DEFAULT_LOCAL_SIZE](
nopt, price, strike, t, rate, vol, call, put
)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_gpu.run("Numba@jit-loop-par", black_scholes_driver)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_kernel.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_kernel.sh; else ./run_black_sholes_kernel.sh; fi
```
## Implementation of Black Scholes targeting GPU using Numpy
In the following example, we can observe the Black Scholes NumPy implementation and we target the GPU using the NumPy approach.
1. Inspect the code cell below and click run ▶ to save the code to a file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile lab/black_sholes_numpy_graph.py
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf_graph
import numba as nb
import numpy as np
from numpy import log, exp, sqrt
from math import erf
# Numba does know erf function from numpy or scipy
@nb.vectorize(nopython=True)
def nberf(x):
return erf(x)
# blackscholes implemented using numpy function calls
@nb.jit(nopython=True, parallel=True, fastmath=True)
def black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
P = price
S = strike
T = t
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1.0 / sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * nberf(w1)
d2 = 0.5 + 0.5 * nberf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call[:] = r # temporary `r` is necessary for faster `put` computation
put[:] = r - P + Se
def black_scholes(nopt, price, strike, t, rate, vol, call, put):
# offload blackscholes computation to GPU (toggle level0 or opencl driver).
with dpctl.device_context(base_bs_erf_graph.get_device_selector()):
black_scholes_kernel(nopt, price, strike, t, rate, vol, call, put)
# call the run function to setup input data and performance data infrastructure
base_bs_erf_graph.run("Numba@jit-numpy", black_scholes)
```
### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_black_sholes_numpy_graph.sh; if [ -x "$(command -v qsub)" ]; then ./q run_black_sholes_numpy_graph.sh; else ./run_black_sholes_numpy_graph.sh; fi
```
# Plot GPU Results
The algorithm below is detecting Calls and Puts verses Current price for a strike price in range 23 to 25 and plots the results in a graph as shown below.
### View the results
Select the cell below and click run ▶ to view the graph:
```
from matplotlib import pyplot as plt
import numpy as np
def read_dictionary(fn):
import pickle
# Load data (deserialize)
with open(fn, 'rb') as handle:
dictionary = pickle.load(handle)
return dictionary
resultsDict = read_dictionary('resultsDict.pkl')
limit = 10
call = resultsDict['call']
put = resultsDict['put']
price = resultsDict['price']
strike = resultsDict['strike']
plt.style.use('dark_background')
priceRange = [23.0, 23.5]
# strikeIndex = np.where((price >= priceRange[0]) & (price < priceRange[1]) )[0]
# plt.scatter(strike[strikeIndex], put[strikeIndex], c= 'r', s = 2, alpha = 1, label = 'puts')
# plt.scatter(strike[strikeIndex], call[strikeIndex], c= 'b', s = 2, alpha = 1, label = 'calls')
# plt.title('Calls and Puts verses Strike for a current price in range {}'.format(priceRange))
# plt.ylabel('Option Price [$]')
# plt.xlabel('Strike Price [$]')
# plt.legend()
# plt.grid()
strikeRange = [23.0, 23.5]
strikeIndex = np.where((strike >= strikeRange[0]) & (strike < strikeRange[1]) )[0]
plt.scatter(price[strikeIndex], put[strikeIndex], c= 'r', s = 2, alpha = 1, label = 'puts')
plt.scatter(price[strikeIndex], call[strikeIndex], c= 'b', s = 2, alpha = 1, label = 'calls')
plt.title('Calls and Puts verses Current price for a strike price in range {}'.format(priceRange))
plt.ylabel('Option Price [$]')
plt.xlabel('Current Price [$]')
plt.legend()
plt.grid()
```
_If the Jupyter cells are not responsive or if they error out when you compile the code samples, please restart the Jupyter Kernel:
"Kernel->Restart Kernel and Clear All Outputs" and compile the code samples again__
## Summary
In this module you will have learned the following:
* Numba implementation of Black Scholes targeting a CPU and GPU using Numba JIT
* Numba-DPPY implementation of Black Scholes on a CPU and GPU using the kernel approach
* Numba-DPPY implementation of Black Scholes on a GPU using Numpy approach
| github_jupyter |
```
# Pandas for managing datasets
import pandas as pd
# seaborn for plotting and styling
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# read dataset
tips = sns.load_dataset("tips")
# a preview of the data
tips.head()
# make a copy of the data to create the graphs of
df = tips.copy()
df
# create a column to determine tip percentage
df["tip_percentage"] = df["tip"] / df["total_bill"]
# This plot is a histogram of tip percentages
# The hue argument allows the color to be changed to reflect the categories
sns.histplot(x='tip_percentage', binwidth = 0.05, hue = 'sex', data = df)
# Scatterplot of total bill and tip
# This shows how you can set the style to change the visual style
# The default relplot is a scatterplot
sns.set(style = 'darkgrid')
sns.relplot( x = 'total_bill', y = 'tip', hue = 'smoker', data = df)
# Scatterplot Gender
# This scatterplot is the same with the addition of the size argument
# The size argument is time here
sns.set(style = 'darkgrid')
gender = sns.relplot( x = 'total_bill', y = 'tip', hue = 'sex', size = 'time', data = df)
# Catplot is for categorical data
# The default catplot is a strip plot
sns.catplot(x = 'day', y = 'total_bill', data = df)
# This catplot shows that with the addition of the kind argument,
# we can alter it to another cat plot, in this case, a barplot
sns.catplot(x = 'time', y = 'total_bill', data= df, kind='bar')
# A violin plot is another way of visualizing categorical data
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', data = df)
# This violoin plot shows the same data above
# With different arguments, different visuals are created
# Here we set bw to 0.25 and split to True
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', bw = .25, split = True, data = df)
# This shows how we can alter the color palette of a violin plot
sns.violinplot(x = 'day', y = 'total_bill', hue = 'sex', bw = .25, split = True, palette = 'Greens', data = df)
# Pairplots allow visualization of many distributions at once
# Seaborn determines the visualizations and the variables to create
# This allows the user to quickly view distributions very easily
sns.set_theme(style="ticks")
sns.pairplot(df, hue='sex')
# This swarm plot is similar to a strip plot but does not allow points to overlap
# The style is whitegrid
sns.swarmplot(y='total_bill', x = 'day', data = df)
sns.set_style('whitegrid')
# Seaborn can also create heatmaps
# This heatmap shows correlation between variables
sns.heatmap(df.corr(), annot = True, cmap = 'viridis')
# This heatmap requires creation of a pivot table
# This shows that Seaborn can work with pivot tables
pivot = df.pivot_table(index = ['day'], columns =['size'], values = 'tip_percentage', aggfunc = np.average)
sns.heatmap(pivot)
# This plot shows Seaborn's ability to create side by side visuals
# The col argument allows for this
pal = dict(Male='#6495ED', Female = '#F08080')
g = sns.lmplot(x='total_bill', y = 'tip_percentage', col = 'sex', hue='sex', data =df,
palette=pal, y_jitter=.02, logistic = True, truncate = True)
# This plot is an example of how you can overlay visualizations
# This is a boxplot with a stripplot on top
sns.stripplot(x='tip', y = 'day', data = df, jitter = True, dodge = True, linewidth=1,
edgecolor = 'gray', palette = 'gray')
colors = ['#78C850', '#F08030', '#6890F0','#F8D030']
sns.boxplot(x='tip', y='day',data = df, fliersize=0, palette = colors)
```
| github_jupyter |
TSG097 - Get BDC stateful sets (Kubernetes)
===========================================
Description
-----------
Steps
-----
### Common functions
Define helper functions used in this notebook.
```
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("tsg097-get-statefulsets.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
```
### Get the Kubernetes namespace for the big data cluster
Get the namespace of the Big Data Cluster use the kubectl command line
interface .
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
```
### Run kubectl to display the Stateful sets
```
run(f"kubectl get statefulset -n {namespace} -o wide")
print('Notebook execution complete.')
```
| github_jupyter |
## Trajectory equations:
```
%matplotlib inline
import matplotlib.pyplot as plt
from sympy import *
init_printing()
Bx, By, Bz, B = symbols("B_x, B_y, B_z, B")
x, y, z = symbols("x, y, z" )
x_0, y_0, z_0 = symbols("x_0, y_0, z_0")
vx, vy, vz, v = symbols("v_x, v_y, v_z, v")
vx_0, vy_0, vz_0 = symbols("v_x0, v_y0, v_z0")
t = symbols("t")
q, m = symbols("q, m")
c, eps0 = symbols("c, epsilon_0")
```
The equation of motion:
$$
\begin{gather*}
m \frac{d^2 \vec{r} }{dt^2} = \frac{q}{c} [ \vec{v} \vec{B} ]
\end{gather*}
$$
For the case of a uniform magnetic field along the $z$-axis:
$$ \vec{B} = B_z = B, \quad B_x = 0, \quad B_y = 0 $$
In Cortesian coordinates:
```
eq_x = Eq( Derivative(x(t), t, 2), q / c / m * Bz * Derivative(y(t),t) )
eq_y = Eq( Derivative(y(t), t, 2), - q / c / m * Bz * Derivative(x(t),t) )
eq_z = Eq( Derivative(z(t), t, 2), 0 )
display( eq_x, eq_y, eq_z )
```
Motion is uniform along the $z$-axis:
```
z_eq = dsolve( eq_z, z(t) )
vz_eq = Eq( z_eq.lhs.diff(t), z_eq.rhs.diff(t) )
display( z_eq, vz_eq )
```
The constants of integration can be found from the initial conditions $z(0) = z_0$ and $v_z(0) = v_{z0}$:
```
c1_c2_system = []
initial_cond_subs = [(t, 0), (z(0), z_0), (diff(z(t),t).subs(t,0), vz_0) ]
c1_c2_system.append( z_eq.subs( initial_cond_subs ) )
c1_c2_system.append( vz_eq.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
```
So that
```
z_sol = z_eq.subs( c1_c2 )
vz_sol = vz_eq.subs( c1_c2 ).subs( [( diff(z(t),t), vz(t) ) ] )
display( z_sol, vz_sol )
```
For some reason I have not been able to solve the system of differential equations for $x$ and $y$ directly
with Sympy's `dsolve` function:
```
#dsolve( [eq_x, eq_y], [x(t),y(t)] )
```
It is necessary to resort to the manual solution. The method is to differentiate one of them over
time and substitute the other. This will result in oscillator-type second-order equations for $v_y$ and $v_x$. Their solution is known. Integrating one more time, it is possible to obtain laws of motion $x(t)$ and $y(t)$.
```
v_subs = [ (Derivative(x(t),t), vx(t)), (Derivative(y(t),t), vy(t)) ]
eq_vx = eq_x.subs( v_subs )
eq_vy = eq_y.subs( v_subs )
display( eq_vx, eq_vy )
eq_d2t_vx = Eq( diff(eq_vx.lhs,t), diff(eq_vx.rhs,t))
eq_d2t_vx = eq_d2t_vx.subs( [(eq_vy.lhs, eq_vy.rhs)] )
display( eq_d2t_vx )
```
The solution of the last equation is
```
C1, C2, Omega = symbols( "C1, C2, Omega" )
vx_eq = Eq( vx(t), C1 * cos( Omega * t ) + C2 * sin( Omega * t ))
display( vx_eq )
omega_eq = Eq( Omega, Bz * q / c / m )
display( omega_eq )
```
where $\Omega$ is a cyclotron frequency.
```
display( vx_eq )
vy_eq = Eq( vy(t), solve( Eq( diff(vx_eq.rhs,t), eq_vx.rhs ), ( vy(t) ) )[0] )
vy_eq = vy_eq.subs( [(Omega*c*m / Bz / q, omega_eq.rhs * c * m / Bz / q)]).simplify()
display( vy_eq )
```
For initial conditions $v_x(0) = v_{x0}, v_y(0) = v_{y0}$:
```
initial_cond_subs = [(t,0), (vx(0), vx_0), (vy(0), vy_0) ]
vx0_eq = vx_eq.subs( initial_cond_subs )
vy0_eq = vy_eq.subs( initial_cond_subs )
display( vx0_eq, vy0_eq )
c1_c2 = solve( [vx0_eq, vy0_eq] )
c1_c2_subs = [ ("C1", c1_c2[c1]), ("C2", c1_c2[c2]) ]
vx_eq = vx_eq.subs( c1_c2_subs )
vy_eq = vy_eq.subs( c1_c2_subs )
display( vx_eq, vy_eq )
```
These equations can be integrated to obtain the laws of motion:
```
x_eq = vx_eq.subs( vx(t), diff(x(t),t))
x_eq = dsolve( x_eq )
y_eq = vy_eq.subs( vy(t), diff(y(t),t))
y_eq = dsolve( y_eq ).subs( C1, C2 )
display( x_eq, y_eq )
```
For nonzero $\Omega$:
```
x_eq = x_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] )
y_eq = y_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] )
display( x_eq, y_eq )
```
For initial conditions $x(0) = x_0, y(0) = y_0$:
```
initial_cond_subs = [(t,0), (x(0), x_0), (y(0), y_0) ]
x0_eq = x_eq.subs( initial_cond_subs )
y0_eq = y_eq.subs( initial_cond_subs )
display( x0_eq, y0_eq )
c1_c2 = solve( [x0_eq, y0_eq] )
c1_c2_subs = [ ("C1", c1_c2[0][c1]), ("C2", c1_c2[0][c2]) ]
x_eq = x_eq.subs( c1_c2_subs )
y_eq = y_eq.subs( c1_c2_subs )
display( x_eq, y_eq )
x_eq = x_eq.simplify()
y_eq = y_eq.simplify()
x_eq = x_eq.expand().collect(Omega)
y_eq = y_eq.expand().collect(Omega)
display( x_eq, y_eq )
```
Finally
```
display( x_eq, y_eq, z_sol )
display( vx_eq, vy_eq, vz_sol )
display( omega_eq )
```
| github_jupyter |
# 人力规划
等级:高级
## 目的和先决条件
此模型是人员编制问题的一个示例。在人员编制计划问题中,必须在招聘,培训,裁员(裁员)和安排工时方面做出选择。人员配备问题在制造业和服务业广泛存在。
### What You Will Learn
In this example, we will model and solve a manpower planning problem. We have three types of workers with different skills levels. For each year in the planning horizon, the forecasted number of required workers with specific skills is given. It is possible to recruit new people, train workers to improve their skills, or shift them to a part-time working arrangement. The aim is to create an optimal multi-period operation plan that achieves one of the following two objectives: minimizing the total number of layoffs over the whole horizon or minimizing total costs.
More information on this type of model can be found in example #5 of the fifth edition of Model Building in Mathematical Programming, by H. Paul Williams on pages 256-257 and 303-304.
This modeling example is at the advanced level, where we assume that you know Python and the Gurobi Python API and that you have advanced knowledge of building mathematical optimization models. Typically, the objective function and/or constraints of these examples are complex or require advanced features of the Gurobi Python API.
**Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*.
---
## Problem Description
A company is changing how it runs its business, and therefore its staffing needs are expected to change.
Through the purchase of new machinery, it is expected that there will be less need for unskilled labor and more need for skilled and semi-skilled labor. In addition, a lower sales forecast — driven by an economic slowdown that is predicted to happen in the next year — is expected to further reduce labor needs across all categories.
The forecast for labor needs over the next three years is as follows:
| <i></i> | Unskilled | Semi-skilled | Skilled |
| --- | --- | --- | --- |
| Current Strength | 2000 | 1500 | 1000 |
| Year 1 | 1000 | 1400 | 1000 |
| Year 2 | 500 | 2000 | 1500 |
| Year 3 | 0 | 2500 | 2000 |
The company needs to determine the following for each of the next three years:
- Recruitment
- Retraining
- Layoffs (redundancy)
- Part-time vs. full-time employees
It is important to note that labor is subject to a certain level of natural attrition each year. The rate of attrition is relatively high in the first year after a new employee is hired and relatively low in subsequent years. The expected attrition rates are as follows:
| <i></i> | Unskilled (%)| Semi-skilled (%) | Skilled (%) |
| --- | --- | --- | --- |
| $< 1$ year of service | 25 | 20 | 10 |
| $\geq 1$ year of service | 10 | 5 | 5 |
All of the current workers have been with the company for at least one year.
### Recruitment
Each year, it is possible to hire a limited number of employees in each classification from outside the company as follows:
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| 500 | 800 | 500 |
### Retraining
Each year, it is possible to train up to 200 unskilled workers to make them into semi-skilled workers. This training costs the company $\$400$ per worker.
In addition, it is possible train semi-skilled workers to make them into skilled workers. However, this number can not exceed 25% of the current skilled labor force and this training costs $\$500$ per worker.
Lastly, downgrading workers to a lower skill level can be done. However, 50% of the downgraded workers will leave the company, increasing the natural attrition rate described above.
### Layoffs
Each laid-off worker is entitled to a separation payment at the rate of $\$200$ per unskilled worker and $\$500$ per semi-skilled or skilled worker.
### Excess Employees
It is possible to have workers in excess of the actual number needed, up to 150 workers in total in any given year, but this will result in the following additional cost per excess employee per year.
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| $\$1500$ | $\$2000$ | $\$3000$ |
### Part-time Workers
Up to 50 employees of each skill level can be assigned to part-time work. The cost of doing so (per employee, per year) is as follows:
| Unskilled | Semi-skilled | Skilled |
| --- | --- | --- |
| $\$500$ | $\$400$ | $\$400$ |
**Note:** A part-time employee is half as productive as a full-time employee.
If the company’s objective is to minimize layoffs, what plan should they adopt in order to do this?
If their objective is to minimize costs, how much could they further reduce costs?
How can they determine the annual savings possible across each job?
---
## Model Formulation
### Sets and Indices
$t \in \text{Years}=\{1,2,3\}$: Set of years.
$s \in \text{Skills}=\{s_1: \text{unskilled},s_2: \text{semi_skilled},s_3: \text{skilled}\}$: Set of skills.
### Parameters
$\text{rookie_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave within the first year of service.
$\text{veteran_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave after the first year of service.
$\text{demoted_attrition} \in [0,1] \subset \mathbb{R}^+$: Percentage of workers who leave the company after a demotion.
$\text{parttime_cap} \in [0,1] \subset \mathbb{R}^+$: Productivity of part-time workers with respect to full-time workers.
$\text{max_train_unskilled} \in \mathbb{N}$: Maximum number of unskilled workers that can be trained on any given year.
$\text{max_train_semiskilled} \in [0,1] \subset \mathbb{R}^+$: Maximum proportion of semi-skilled workers (w.r.t. skilled ones) that can be trained on any given year.
$\text{max_parttime} \in \mathbb{N}$: Maximum number of part-time workers of each skill at any given year.
$\text{max_overmanning} \in \mathbb{N}$: Maximum number of overmanned workers at any given year.
$\text{max_hiring}_s \in \mathbb{N}$: Maximum number of workers of skill $s$ that can be hired any given year.
$\text{training_cost}_s \in \mathbb{R}^+$: Cost for training a worker of skill $s$ to the next level.
$\text{layoff_cost}_s \in \mathbb{R}^+$: Cost for laying off a worker of skill $s$.
$\text{parttime_cost}_s \in \mathbb{R}^+$: Cost for assigning a worker of skill $s$ to part-time work.
$\text{overmanning_cost}_s \in \mathbb{R}^+$: Yearly cost for having excess manpower of skill $s$.
$\text{curr_workforce}_s \in \mathbb{N}$: Current manpower of skill $s$ at the beginning of the planning horizon.
$\text{demand}_{t,s} \in \mathbb{N}$: Required manpower of skill $s$ in year $t$.
### Decision Variables
$\text{hire}_{t,s} \in [0,\text{max_hiring}_s] \subset \mathbb{R}^+$: Number of workers of skill $s$ to hire in year $t$.
$\text{part_time}_{t,s} \in [0,\text{max_parttime}] \subset \mathbb{R}^+$: Number of part-time workers of skill $s$ working in year $t$.
$\text{workforce}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are available in year $t$.
$\text{layoff}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are laid off in year $t$.
$\text{excess}_{t,s} \in \mathbb{R}^+$: Number of workers of skill $s$ that are overmanned in year $t$.
$\text{train}_{t,s,s'} \in \mathbb{R}^+$: Number of workers of skill $s$ to retrain to skill $s'$ in year $t$.
### Objective Function
- **Layoffs:** Minimize the total layoffs during the planning horizon.
\begin{equation}
\text{Minimize} \quad Z = \sum_{t \in \text{Years}}\sum_{s \in \text{Skills}}{\text{layoff}_{t,s}}
\end{equation}
- **Cost:** Minimize the total cost (in USD) incurred by training, overmanning, part-time workers, and layoffs in the planning horizon.
\begin{equation}
\text{Minimize} \quad W = \sum_{t \in \text{Years}}{\{\text{training_cost}_{s_1}*\text{train}_{t,s1,s2} + \text{training_cost}_{s_2}*\text{train}_{t,s2,s3}\}}
\end{equation}
\begin{equation}
+ \sum_{t \in \text{Years}}\sum_{s \in \text{Skills}}{\{\text{parttime_cost}*\text{part_time}_{t,s} + \text{layoff_cost}_s*\text{layoff}_{t,s} + \text{overmanning_cost}_s*\text{excess}_{t,s}\}}
\end{equation}
### Constraints
- **Initial Balance:** Workforce $s$ available in year $t=1$ is equal to the workforce of the previous year, recent hires, promoted and demoted workers (after accounting for attrition), minus layoffs and transferred workers.
\begin{equation}
\text{workforce}_{1,s} = (1-\text{veteran_attrition}_s)*\text{curr_workforce} + (1-\text{rookie_attrition}_s)*\text{hire}_{1,s}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' < s}{\{(1-\text{veteran_attrition})*\text{train}_{1,s',s} - \text{train}_{1,s,s'}\}}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' > s}{\{(1-\text{demoted_attrition})*\text{train}_{1,s',s} - \text{train}_{1,s,s'}\}} - \text{layoff}_{1,s} \qquad \forall s \in \text{Skills}
\end{equation}
- **Balance:** Workforce $s$ available in year $t > 1$ is equal to the workforce of the previous year, recent hires, promoted and demoted workers (after accounting for attrition), minus layoffs and transferred workers.
\begin{equation}
\text{workforce}_{t,s} = (1-\text{veteran_attrition}_s)*\text{workforce}_{t-1,s} + (1-\text{rookie_attrition}_s)*\text{hire}_{t,s}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' < s}{\{(1-\text{veteran_attrition})*\text{train}_{t,s',s} - \text{train}_{t,s,s'}\}}
\end{equation}
\begin{equation}
+ \sum_{s' \in \text{Skills} | s' > s}{\{(1-\text{demotion_attrition})*\text{train}_{t,s',s} - \text{train}_{t,s,s'}\}} - \text{layoff}_{t,s} \quad \forall (t > 1,s) \in \text{Years} \times \text{Skills}
\end{equation}
- **Unskilled Training:** Unskilled workers trained in year $t$ cannot exceed the maximum allowance. Unskilled workers cannot be immediately transformed into skilled workers.
\begin{equation}
\text{train}_{t,s_1,s_2} \leq 200 \quad \forall t \in \text{Years}
\end{equation}
\begin{equation}
\text{train}_{t,s_1,s_3} = 0 \quad \forall t \in \text{Years}
\end{equation}
- **Semi-skilled Training:** Semi-skilled workers trained in year $t$ cannot exceed the maximum allowance.
\begin{equation}
\text{train}_{t,s_2,s_3} \leq 0.25*\text{available}_{t,s_3} \quad \forall t \in \text{Years}
\end{equation}
- **Overmanning:** Excess workers in year $t$ cannot exceed the maximum allowance.
\begin{equation}
\sum_{s \in \text{Skills}}{\text{excess}_{t,s}} \leq \text{max_overmanning} \quad \forall t \in \text{Years}
\end{equation}
- **Demand:** Workforce $s$ available in year $t$ equals the required number of workers plus the excess workers and the part-time workers.
\begin{equation}
\text{available}_{t,s} = \text{demand}_{t,s} + \text{excess}_{t,s} + \text{parttime_cap}*\text{part_time}_{t,s} \quad \forall (t,s) \in \text{Years} \times \text{Skills}
\end{equation}
---
## Python Implementation
We import the Gurobi Python Module and other Python libraries.
```
import gurobipy as gp
import numpy as np
import pandas as pd
from gurobipy import GRB
# tested with Python 3.7.0 & Gurobi 9.0
```
## Input Data
We define all the input data of the model.
```
# Parameters
years = [1, 2, 3]
skills = ['s1', 's2', 's3']
curr_workforce = {'s1': 2000, 's2': 1500, 's3': 1000}
demand = {
(1, 's1'): 1000,
(1, 's2'): 1400,
(1, 's3'): 1000,
(2, 's1'): 500,
(2, 's2'): 2000,
(2, 's3'): 1500,
(3, 's1'): 0,
(3, 's2'): 2500,
(3, 's3'): 2000
}
rookie_attrition = {'s1': 0.25, 's2': 0.20, 's3': 0.10}
veteran_attrition = {'s1': 0.10, 's2': 0.05, 's3': 0.05}
demoted_attrition = 0.50
max_hiring = {
(1, 's1'): 500,
(1, 's2'): 800,
(1, 's3'): 500,
(2, 's1'): 500,
(2, 's2'): 800,
(2, 's3'): 500,
(3, 's1'): 500,
(3, 's2'): 800,
(3, 's3'): 500
}
max_overmanning = 150
max_parttime = 50
parttime_cap = 0.50
max_train_unskilled = 200
max_train_semiskilled = 0.25
training_cost = {'s1': 400, 's2': 500}
layoff_cost = {'s1': 200, 's2': 500, 's3': 500}
parttime_cost = {'s1': 500, 's2': 400, 's3': 400}
overmanning_cost = {'s1': 1500, 's2': 2000, 's3': 3000}
```
## Model Deployment
We create a model and the variables. For each of the three skill levels and for each year, we will create variables for the number of workers that get recruited, transferred into part-time work, are available as workers, are redundant, or are overmanned. For each pair of skill levels and each year, we have a variable for the amount of workers that get retrained to a higher/lower skill level. The number of people who are part-time and can be recruited is limited.
```
manpower = gp.Model('Manpower planning')
hire = manpower.addVars(years, skills, ub=max_hiring, name="Hire")
part_time = manpower.addVars(years, skills, ub=max_parttime,
name="Part_time")
workforce = manpower.addVars(years, skills, name="Available")
layoff = manpower.addVars(years, skills, name="Layoff")
excess = manpower.addVars(years, skills, name="Overmanned")
train = manpower.addVars(years, skills, skills, name="Train")
```
Next, we insert the constraints. The balance constraints ensure that per skill level and per year the workers who are currently required (LaborForce) and the people who get laid off, and the people who get retrained to the current level, minus the people who get retrained from the current level to a different skill, equals the LaborForce of the last year (or the CurrentStrength in the first year) plus the recruited people. A certain amount of people leave the company each year, so this is also considered to be a factor. This constraint describes the change in the total amount of employed workers.
```
#1.1 & 1.2 Balance
Balance = manpower.addConstrs(
(workforce[year, level] == (1-veteran_attrition[level])*(curr_workforce[level] if year == 1 else workforce[year-1, level])
+ (1-rookie_attrition[level])*hire[year, level] + gp.quicksum((1- veteran_attrition[level])* train[year, level2, level]
-train[year, level, level2] for level2 in skills if level2 < level)
+ gp.quicksum((1- demoted_attrition)* train[year, level2, level] -train[year, level, level2] for level2 in skills if level2 > level)
- layoff[year, level] for year in years for level in skills), "Balance")
```
The Unskilled training constraints force that per year only 200 workers can be retrained from Unskilled to Semi-skilled due to capacity limitations. Also, no one can be trained in one year from Unskilled to Skilled.
```
#2.1 & 2.2 Unskilled training
UnskilledTrain1 = manpower.addConstrs((train[year, 's1', 's2'] <= max_train_unskilled for year in years), "Unskilled_training1")
UnskilledTrain2 = manpower.addConstrs((train[year, 's1', 's3'] == 0 for year in years), "Unskilled_training2")
```
The Semi-skilled training states that the retraining of Semi-skilled workers to skilled workers is limited to no more than one quarter of the skilled labor force at this time. This is due to capacity limitations.
```
#3. Semi-skilled training
SemiskilledTrain = manpower.addConstrs((train[year,'s2', 's3'] <= max_train_semiskilled * workforce[year,'s3'] for year in years), "Semiskilled_training")
```
The overmanning constraints ensure that the total overmanning over all skill levels in one year is no more than 150.
```
#4. Overmanning
Overmanning = manpower.addConstrs((excess.sum(year, '*') <= max_overmanning for year in years), "Overmanning")
```
The demand constraints ensure that the number of workers of each level and year equals the required number of workers plus the Overmanned workers and the number of workers who are working part-time.
```
#5. Demand
Demand = manpower.addConstrs((workforce[year, level] ==
demand[year,level] + excess[year, level] + parttime_cap * part_time[year, level]
for year in years for level in skills), "Requirements")
```
The first objective is to minimize the total number of laid off workers. This can be stated as:
```
#0.1 Objective Function: Minimize layoffs
obj1 = layoff.sum()
manpower.setObjective(obj1, GRB.MINIMIZE)
```
The second alternative objective is to minimize the total cost of all employed workers and costs for retraining:
```
obj2 = quicksum((training_cost[level]*train[year, level, skills[skills.index(level)+1]] if level < 's3' else 0)
+ layoff_cost[level]*layoff[year, level]
+ parttime_cost[level]*part_time[year, level]
+ overmanning_cost[level] * excess[year, level] for year in years for level in skills)
```
Next we start the optimization with the objective function of minimizing layoffs, and Gurobi finds the optimal solution.
```
manpower.optimize()
```
## Analysis
The minimum number of layoffs is 841.80. The optimal policies to achieve this minimum number of layoffs are given below.
### Hiring Plan
This plan determines the number of new workers to hire at each year of the planning horizon (rows) and each skill level (columns). For example, at year 2 we are going to hire 649.3 Semi-skilled workers.
```
rows = years.copy()
columns = skills.copy()
hire_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in hire.keys():
if (abs(hire[year, level].x) > 1e-6):
hire_plan.loc[year, level] = np.round(hire[year, level].x, 1)
hire_plan
```
### Training and Demotions Plan
This plan defines the number of workers to promote by training (or demote) at each year of the planning horizon. For example, in year 1 we are going to demote 168.4 skilled (s3) workers to the level of semi-skilled (s2).
```
rows = years.copy()
columns = ['{0} to {1}'.format(level1, level2) for level1 in skills for level2 in skills if level1 != level2]
train_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level1, level2 in train.keys():
col = '{0} to {1}'.format(level1, level2)
if (abs(train[year, level1, level2].x) > 1e-6):
train_plan.loc[year, col] = np.round(train[year, level1, level2].x, 1)
train_plan
```
### Layoffs Plan
This plan determines the number of workers to layoff of each skill level at each year of the planning horizon. For example, we are going to layoff 232.5 Unskilled workers in year 3.
```
rows = years.copy()
columns = skills.copy()
layoff_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in layoff.keys():
if (abs(layoff[year, level].x) > 1e-6):
layoff_plan.loc[year, level] = np.round(layoff[year, level].x, 1)
layoff_plan
```
### Part-time Plan
This plan defines the number of part-time workers of each skill level working at each year of the planning horizon. For example, in year 1, we have 50 part-time skilled workers.
```
rows = years.copy()
columns = skills.copy()
parttime_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in part_time.keys():
if (abs(part_time[year, level].x) > 1e-6):
parttime_plan.loc[year, level] = np.round(part_time[year, level].x, 1)
parttime_plan
```
### Overmanning Plan
This plan determines the number of excess workers of each skill level working at each year of the planning horizon. For example, we have 150 Unskilled excess workers in year 3.
```
rows = years.copy()
columns = skills.copy()
excess_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for year, level in excess.keys():
if (abs(excess[year, level].x) > 1e-6):
excess_plan.loc[year, level] = np.round(excess[year, level].x, 1)
excess_plan
```
By minimizing the cost instead, we could implement policies that would cost $\$498,677.29$ over the three-year period and result in 1,423.7 layoffs. Alternative optimal solutions could be considered to reduce layoffs without increasing cost. If we minimize costs instead of layoffs, we can save $\$942,712.51$ at the expense of 581.9 additional layoffs. Thus, the cost of saving each job, when minimizing layoffs, could be regarded as $\$1,620.06$.
**Note:** If you want to write your solution to a file, rather than print it to the terminal, you can use the model.write() command. An example implementation is:
`manpower.write("manpower-planning-output.sol")`
---
## References
H. Paul Williams, Model Building in Mathematical Programming, fifth edition.
Copyright © 2020 Gurobi Optimization, LLC
| github_jupyter |
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from ipyexperiments import *
from lib.fastai.imports import *
from lib.fastai.structured import *
import pandas as pd
import numpy as np
import lightgbm as lgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
from datetime import datetime
from path import Path
import re2 as re
import joblib
## Dainis's work
def display_n(df, n=250):
with pd.option_context("display.max_rows", n):
with pd.option_context("display.max_columns", n):
display(df)
def add_datepart(df, fldname, drop=False, time=False):
"Helper function that adds columns relevant to a date."
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
## Pietro and Wojtek work
def add_timestamps(df):
"Funection that loads time values from numpy files"
datedictAS = np.load('dates/AvSigVersionTimestamps.npy')[()]
df['DateAS'] = df['AvSigVersion'].map(datedictAS)
datedictOS = np.load('dates/OSVersionTimestamps.npy')[()]
df['DateOS'] = df['Census_OSVersion'].map(datedictOS)
# BL timestamp
def convert(x):
try:
d = datetime.strptime(x.split('.')[4],'%y%m%d-%H%M')
except:
d = np.nan
return d
df['DateBL'] = df['OsBuildLab'].map(convert)
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
# Uncomment the followng block on the first run
'''
with IPyExperimentsCPU():
print('Download Train and Test Data.\n')
# Pietro, uncomment the following line and comment out the next one
# INPUT_DIR = Path('E:/malware_microsoft' )
INPUT_DIR = Path('./input' )
train = pd.read_csv(Path(INPUT_DIR / 'train.csv'), dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv(Path(INPUT_DIR /'test.csv'), dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
add_timestamps(train)
add_timestamps(test)
joblib.dump(train, 'data/train_w_time_origin.pkl')
joblib.dump(test, 'data/test_w_time_origin.pkl')
'''
def versioning(df, fldname, drop=False):
"Helper function that adds columns relevant to a date."
versions = df[fldname].str.split('.', expand=True)
for i, v in enumerate(versions):
df[fldname+'V'+str(i)] = versions[v]
if drop: df.drop(fldname, axis=1, inplace=True)
def versioning(df, fldname, categorical_vars, drop=False):
"Helper function that adds columns relevant to a date."
versions = df[fldname].str.split(',', expand=True)
for i, v in enumerate(versions):
newfld = fldname+'V'+i
df[newfld] = versions[v]
categorical_vars.append(newfld)
if drop: df.drop(fldname, axis=1, inplace=True)
with IPyExperimentsCPU() as preprocess:
categorical_vars = [
'MachineIdentifier',
'ProductName',
'EngineVersion',
'AppVersion',
'AvSigVersion',
'Platform',
'Processor',
'OsVer',
'OsPlatformSubRelease',
'OsBuildLab',
'SkuEdition',
'PuaMode',
'SmartScreen',
'Census_MDC2FormFactor',
'Census_DeviceFamily',
'Census_ProcessorClass',
'Census_PrimaryDiskTypeName',
'Census_ChassisTypeName',
'Census_PowerPlatformRoleName',
'Census_InternalBatteryType',
'Census_OSVersion',
'Census_OSArchitecture',
'Census_OSBranch',
'Census_OSEdition',
'Census_OSSkuName',
'Census_OSInstallTypeName',
'Census_OSWUAutoUpdateOptionsName',
'Census_GenuineStateName',
'Census_ActivationChannel',
'Census_FlightRing',
]
train=joblib.load('data/train_w_time_origin.pkl')
test=joblib.load('data/test_w_time_origin.pkl')
test['HasDetections'] = -1
add_datepart(train, 'DateAS', drop=False, time=True)
add_datepart(train, 'DateOS', drop=False, time=True)
add_datepart(train, 'DateBL', drop=False, time=True)
add_datepart(test, 'DateAS', drop=False, time=True)
add_datepart(test, 'DateOS', drop=False, time=True)
add_datepart(test, 'DateBL', drop=False, time=True)
preprocess.keep_var_names('train', 'test', 'categorical_vars')
joblib.dump(categorical_vars, 'val/categorical.pkl')
with pd.option_context("display.max_rows", 100):
with pd.option_context("display.max_columns", 100):
display(train[categorical_vars].head())
versioned = ['EngineVersion','AppVersion','AvSigVersion','OsVer','Census_OSVersion','OsBuildLab']
with IPyExperimentsCPU() as vsplits:
for ver in versioned:
versioning(train, ver)
versioning(test, ver)
df_raw = pd.concat([train, test], sort=False)
train_cats(df_raw)
df, y, nas = proc_df(df_raw)
train = df.head(len(train)).reset_index(drop=True)
test = df.tail(len(test)).reset_index(drop=True)
joblib.dump(train,'data/train_dainis.pkl')
joblib.dump(test,'data/test_dainis.pkl')
with IPyExperimentsCPU() as transform:
'''
print('Transform all features to category.\n')
for i, usecol in enumerate(categorical_vars):
print(str(i) + " / " + str(len(categorical_vars)))
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
#Fit LabelEncoder
le = LabelEncoder().fit(
np.unique(train[usecol].unique().tolist()+
test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Train'}, axis=1))
agg_te = (test
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)
agg['Total'] = agg['Train'] + agg['Test']
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]
agg[usecol+'Copy'] = agg[usecol]
train[usecol+'bis'] = (pd.merge(train[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
test[usecol+'bis'] = (pd.merge(test[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
del le, agg_tr, agg_te, agg, usecol
'''
EXP_TAG=Path('dainis0')
train_ids = train.index
test_ids = test.index
y_train = np.array(train['HasDetections'])
# Fulfill contract with evaluator notebook
joblib.dump(categorical_vars, Path('val' / EXP_TAG / 'categorical.pkl'))
joblib.dump(train, Path('val' / EXP_TAG / 'train-original.pkl'))
joblib.dump(test,Path( 'val' / EXP_TAG / ' test-original.pkl'))
joblib.dump(y_train, Path('val' / EXP_TAG / 'y_train-original.pkl'))
joblib.dump(train_ids,Path( 'val' / EXP_TAG / 'train_ids-original.pkl'))
joblib.dump(test_ids, Path('val' / EXP_TAG / 'test_ids-original.pkl'))
```
| github_jupyter |
```
#Goal: obtain a universal time, in Julian Date from a local time in the header of the fits images
from astropy.io import fits #work with fits images
from astropy.time import Time #work with time in header
import glob #work with files in the directory
import yaml #work with yaml files
import numpy as np
import sys
import os
%matplotlib inline
import matplotlib.pyplot as plt #plot library
def init_plotting():
plt.rcParams['figure.figsize'] = (14.0,8.0)
plt.rcParams['font.size'] = 10
#plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['axes.labelsize'] = plt.rcParams['font.size']
plt.rcParams['axes.titlesize'] = 2*plt.rcParams['font.size']
plt.rcParams['legend.fontsize'] = 0.65*plt.rcParams['font.size']
plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['xtick.major.size'] = 3
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['legend.frameon'] = True
plt.rcParams['legend.loc'] = 'best'
plt.rcParams['axes.linewidth'] = 1
init_plotting()
#BAR Progress function to visualize the progress status:
def update_progress(progress):
"""
Progress Bar to visualize the status of a procedure
___
INPUT:
progress: percent of the data
___
Example:
print ""
print "progress : 0->1"
for i in range(100):
time.sleep(0.1)
update_progress(i/100.0)
"""
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
save_path = u'C:\\Users\\walte\\Desktop\\exoplanet\\data\\xo2b\\xo2b.b\\teste_pyraf'
data_path = u'C:\\Users\\walte\\Desktop\\exoplanet\\data\\xo2b\\xo2b.b'
pwd
cd C:/Users/walte/Desktop/exoplanet/data/xo2b/xo2b.b/teste_pyraf/
images = glob.glob('ABxo2b*.fits')
print images
print len(images)
im,hdr = fits.getdata(images[0],header=True) #reading the fits image (data + header)
hdr
```
# Local Time
```
hdr['LOCTIME'] #local time at start of exposure in header
images_time = []
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
images_time.append(hdr['LOCTIME'])
update_progress((i+1.)/len(images))
print images_time #our local time series
```
# FITS Time
```
fits_time = []
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
fits_time.append(hdr['DATE'])
update_progress((i+1.)/len(images))
print fits_time
```
# Observatory (location)
```
#geting the observatory
im,hdr = fits.getdata(images[0],header=True) #reading the fits image (data + header)
observatory_loc = hdr['OBSERVAT']
print observatory_loc
```
# Obtain UT using local time and observatory
```
#time formats
print list(Time.FORMATS)
#Let's using fits time
teste = Time(fits_time[0],format=u'fits')
teste
teste.jd #convert my object test in fits date to julian date
#Let's make to all time series
serie = np.zeros(len(fits_time))
for i in range(len(fits_time)):
serie[i] = Time(fits_time[i],format=u'fits').jd
serie
#Let's confirm our serie
hjd = np.loadtxt('../Results/hjd') #original data
hjd
```
# Error 404: Date don't found!
Yes, and I know why! THe date in abxo2b*.fits images are the date from when it were created. Because of that, we need to extract the date from original images!
```
os.chdir('../')
images = glob.glob('xo2b*.fits')
os.chdir(save_path)
print images
fits_time = []
os.chdir(data_path)
for i in range(len(images)):
im,hdr = fits.getdata(images[i],header=True) #reading the fits image (data + header)
fits_time.append(hdr['DATE'])
update_progress((i+1.)/len(images))
os.chdir(save_path)
print fits_time
#Let's make to all time series
serie = np.zeros(len(fits_time))
for i in range(len(fits_time)):
serie[i] = Time(fits_time[i],format=u'fits').jd
serie
hjd
diff = serie-hjd
plt.figure()
plt.grid()
plt.scatter(hjd,diff)
plt.ylim(min(diff),max(diff))
im,hdr = fits.getdata('../'+images[0],header=True)
hdr
hdr['LOCTIME'],hdr['DATE-OBS']
tempo_imagem = hdr['DATE-OBS']+' '+hdr['LOCTIME']
print tempo_imagem
teste = Time(tempo_imagem,format=u'iso')
teste.jd #Nope
hjd[0]
#****** change time
hdr['UT']
location = '+32:24:59.3 110:44:04.3'
teste = Time(hdr['DATE-OBS']+'T'+hdr['UT'],format='isot',scale='utc')
teste
teste.jd
hjd[0]
hdr.['']
```
# Working with date in header following Kyle's subroutine stcoox.cl in ExoDRPL
```
import yaml
file = yaml.load(open('C:/Users/walte/MEGA/work/codes/iraf_task/input_path.yaml'))
RA,DEC, epoch = file['RA'],file['DEC'],file['epoch']
print RA,DEC,epoch
hdr['DATE-OBS'], hdr['UT']
local_time = Time(hdr['DATE-OBS']+'T'+hdr['ut'],format='isot')
print local_time.jd
teste_loc_time = Time('2012-12-09'+'T'+hdr['ut'],format='isot')
print teste_loc_time.jd
hdr['DATE']
Time(hdr['DATE'],format='fits',scale='tai')
hjd[0]
Time(hdr['DATE'],format='fits',scale='tai').jd2000
hdr
import datetime
hdr['DATE-OBS'],hdr['DATE'],hdr['LOCTIME'],hdr['TIME-OBS'],hdr['TIMESYS']
Time(hdr['DATE'],format='fits',scale='utc')
print Time(hdr['DATE'],scale='utc',format='isot').jd
print Time(hdr['DATE-OBS']+'T'+hdr['TIME-OBS'],scale='utc',format='isot').jd
hjd[0], len(hjd)
hdr['UTC-OBS']
Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd
diff = (Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd - Time(hdr['DATE'],scale='utc',format='isot').jd)/2
print diff
print Time(hdr['IRAF-TLM'],scale='utc',format='isot').jd - diff
```
# Local Time to sideral time
```
local_time = Time(hdr['DATE-OBS']+'T'+hdr['Time-obs'],format='isot',scale='utc')
time_sd = local_time.sidereal_time('apparent',longitude=file['lon-obs'])#with precession and nutation
print time_sd
time_sd.T.hms[0],time_sd.T.hms[1],time_sd.T.hms[2]
local_time.sidereal_time('mean',longitude=file['lon-obs']) #with precession
file['observatory'],file['lon-obs']
time_sd.deg, time_sd.hour
```
# Change degrees to hours...
```
from astropy.coordinates import SkyCoord
from astropy import units as unit
from astropy.coordinates import Angle
RA = Angle(file['RA']+file['u.RA'])
DEC = Angle(file['DEC']+file['u.DEC'])
coordenadas = SkyCoord(RA,DEC,frame='fk5')
coordenadas
coordenadas.ra.hour, coordenadas.dec.deg,coordenadas.equinox,coordenadas.equinox.value
local_time
local_time.hjd
#airmass
airmass = np.loadtxt('../Results/XYpos+Airmass.txt',unpack=True)
airmass[2]
hdr['DATE-OBS'],hdr['UTC-OBS']
file['time-zone'] = 7
file['time-zone']
local_time
import string
hdr['DATE-OBS'].split('-')
float(hdr['DATE-OBS'].split('-')[2])
hdr['UTC-OBS'].split(':'),hdr['UTC-OBS'].split(':')[0]
if float(hdr['UTC-OBS'].split(':')[0]) < file['time-zone']:
new_date = float(hdr['DATE-OBS'].split('-')[2]) - 1
hdr['DATE-OBS'] = hdr['DATE-OBS'].split('-')[0]+'-'+hdr['DATE-OBS'].split('-')[1]+'-'+str(int(new_date))
new_date
hdr['DATE-OBS']
```
| github_jupyter |
**This notebook is an exercise in the [Geospatial Analysis](https://www.kaggle.com/learn/geospatial-analysis) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/interactive-maps).**
---
# Introduction
You are an urban safety planner in Japan, and you are analyzing which areas of Japan need extra earthquake reinforcement. Which areas are both high in population density and prone to earthquakes?
<center>
<img src="https://i.imgur.com/Kuh9gPj.png" width="450"><br/>
</center>
Before you get started, run the code cell below to set everything up.
```
import pandas as pd
import geopandas as gpd
import folium
from folium import Choropleth
from folium.plugins import HeatMap
from learntools.core import binder
binder.bind(globals())
from learntools.geospatial.ex3 import *
```
We define a function `embed_map()` for displaying interactive maps. It accepts two arguments: the variable containing the map, and the name of the HTML file where the map will be saved.
This function ensures that the maps are visible [in all web browsers](https://github.com/python-visualization/folium/issues/812).
```
def embed_map(m, file_name):
from IPython.display import IFrame
m.save(file_name)
return IFrame(file_name, width='100%', height='500px')
```
# Exercises
### 1) Do earthquakes coincide with plate boundaries?
Run the code cell below to create a DataFrame `plate_boundaries` that shows global plate boundaries. The "coordinates" column is a list of (latitude, longitude) locations along the boundaries.
```
plate_boundaries = gpd.read_file("../input/geospatial-learn-course-data/Plate_Boundaries/Plate_Boundaries/Plate_Boundaries.shp")
plate_boundaries['coordinates'] = plate_boundaries.apply(lambda x: [(b,a) for (a,b) in list(x.geometry.coords)], axis='columns')
plate_boundaries.drop('geometry', axis=1, inplace=True)
plate_boundaries.head()
```
Next, run the code cell below without changes to load the historical earthquake data into a DataFrame `earthquakes`.
```
# Load the data and print the first 5 rows
earthquakes = pd.read_csv("../input/geospatial-learn-course-data/earthquakes1970-2014.csv", parse_dates=["DateTime"])
earthquakes.head()
```
The code cell below visualizes the plate boundaries on a map. Use all of the earthquake data to add a heatmap to the same map, to determine whether earthquakes coincide with plate boundaries.
```
# Create a base map with plate boundaries
m_1 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
for i in range(len(plate_boundaries)):
folium.PolyLine(locations=plate_boundaries.coordinates.iloc[i], weight=2, color='black').add_to(m_1)
# Your code here: Add a heatmap to the map
HeatMap(data=earthquakes[['Latitude', 'Longitude']], radius=10).add_to(m_1)
# Uncomment to see a hint
#q_1.a.hint()
# Show the map
embed_map(m_1, 'q_1.html')
# Get credit for your work after you have created a map
q_1.a.check()
# Uncomment to see our solution (your code may look different!)
q_1.a.solution()
```
So, given the map above, do earthquakes coincide with plate boundaries?
```
# View the solution (Run this code cell to receive credit!)
q_1.b.solution()
```
### 2) Is there a relationship between earthquake depth and proximity to a plate boundary in Japan?
You recently read that the depth of earthquakes tells us [important information](https://www.usgs.gov/faqs/what-depth-do-earthquakes-occur-what-significance-depth?qt-news_science_products=0#qt-news_science_products) about the structure of the earth. You're interested to see if there are any intereresting global patterns, and you'd also like to understand how depth varies in Japan.
```
# Create a base map with plate boundaries
m_2 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
for i in range(len(plate_boundaries)):
folium.PolyLine(locations=plate_boundaries.coordinates.iloc[i], weight=2, color='black').add_to(m_2)
# Your code here: Add a map to visualize earthquake depth
# Custom function to assign a color to each circle
def color_producer(val):
if val < 50:
return 'forestgreen'
elif val < 100:
return 'darkorange'
else:
return 'darkred'
# Add a map to visualize earthquake depth
for i in range(0,len(earthquakes)):
folium.Circle(
location=[earthquakes.iloc[i]['Latitude'], earthquakes.iloc[i]['Longitude']],
radius=2000,
color=color_producer(earthquakes.iloc[i]['Depth'])).add_to(m_2)
# Uncomment to see a hint
#q_2.a.hint()
# View the map
embed_map(m_2, 'q_2.html')
# Get credit for your work after you have created a map
q_2.a.check()
# Uncomment to see our solution (your code may look different!)
q_2.a.solution()
```
Can you detect a relationship between proximity to a plate boundary and earthquake depth? Does this pattern hold globally? In Japan?
```
# View the solution (Run this code cell to receive credit!)
q_2.b.solution()
```
### 3) Which prefectures have high population density?
Run the next code cell (without changes) to create a GeoDataFrame `prefectures` that contains the geographical boundaries of Japanese prefectures.
```
# GeoDataFrame with prefecture boundaries
prefectures = gpd.read_file("../input/geospatial-learn-course-data/japan-prefecture-boundaries/japan-prefecture-boundaries/japan-prefecture-boundaries.shp")
prefectures.set_index('prefecture', inplace=True)
prefectures.head()
```
The next code cell creates a DataFrame `stats` containing the population, area (in square kilometers), and population density (per square kilometer) for each Japanese prefecture. Run the code cell without changes.
```
# DataFrame containing population of each prefecture
population = pd.read_csv("../input/geospatial-learn-course-data/japan-prefecture-population.csv")
population.set_index('prefecture', inplace=True)
# Calculate area (in square kilometers) of each prefecture
area_sqkm = pd.Series(prefectures.geometry.to_crs(epsg=32654).area / 10**6, name='area_sqkm')
stats = population.join(area_sqkm)
# Add density (per square kilometer) of each prefecture
stats['density'] = stats["population"] / stats["area_sqkm"]
stats.head()
```
Use the next code cell to create a choropleth map to visualize population density.
```
# Create a base map
m_3 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
# Your code here: create a choropleth map to visualize population density
Choropleth(geo_data=prefectures['geometry'].__geo_interface__,
data=stats['density'],
key_on="feature.id",
fill_color='YlGnBu',
legend_name='Population density (per square kilometer)'
).add_to(m_3)
# Uncomment to see a hint
# q_3.a.hint()
# View the map
embed_map(m_3, 'q_3.html')
# Get credit for your work after you have created a map
q_3.a.check()
# Uncomment to see our solution (your code may look different!)
q_3.a.solution()
```
Which three prefectures have relatively higher density than the others? Are they spread throughout the country, or all located in roughly the same geographical region? (*If you're unfamiliar with Japanese geography, you might find [this map](https://en.wikipedia.org/wiki/Prefectures_of_Japan) useful to answer the questions.)*
```
# View the solution (Run this code cell to receive credit!)
q_3.b.solution()
```
### 4) Which high-density prefecture is prone to high-magnitude earthquakes?
Create a map to suggest one prefecture that might benefit from earthquake reinforcement. Your map should visualize both density and earthquake magnitude.
```
# Create a base map
m_4 = folium.Map(location=[35,136], tiles='cartodbpositron', zoom_start=5)
# Your code here: create a map
def color_producer(magnitude):
if magnitude > 6.5:
return 'red'
else:
return 'green'
Choropleth(
geo_data=prefectures['geometry'].__geo_interface__,
data=stats['density'],
key_on="feature.id",
fill_color='BuPu',
legend_name='Population density (per square kilometer)').add_to(m_4)
for i in range(0,len(earthquakes)):
folium.Circle(
location=[earthquakes.iloc[i]['Latitude'], earthquakes.iloc[i]['Longitude']],
popup=("{} ({})").format(
earthquakes.iloc[i]['Magnitude'],
earthquakes.iloc[i]['DateTime'].year),
radius=earthquakes.iloc[i]['Magnitude']**5.5,
color=color_producer(earthquakes.iloc[i]['Magnitude'])).add_to(m_4)
# Uncomment to see a hint
q_4.a.hint()
# View the map
embed_map(m_4, 'q_4.html')
# Get credit for your work after you have created a map
q_4.a.check()
# Uncomment to see our solution (your code may look different!)
q_4.a.solution()
```
Which prefecture do you recommend for extra earthquake reinforcement?
```
# View the solution (Run this code cell to receive credit!)
q_4.b.solution()
```
# Keep going
Learn how to convert names of places to geographic coordinates with **[geocoding](https://www.kaggle.com/alexisbcook/manipulating-geospatial-data)**. You'll also explore special ways to join information from multiple GeoDataFrames.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/geospatial-analysis/discussion) to chat with other learners.*
| github_jupyter |
# Classification with Neural Network for Yoga poses detection
## Import Dependencies
```
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.model_selection import train_test_split
```
## Getting the data (images) and labels
```
# Data path
train_dir = 'pose_recognition_data/dataset'
# Getting the folders name to be able to labelize the data
Name=[]
for file in os.listdir(train_dir):
Name+=[file]
print(Name)
print(len(Name))
N=[]
for i in range(len(Name)):
N+=[i]
normal_mapping=dict(zip(Name,N))
reverse_mapping=dict(zip(N,Name))
def mapper(value):
return reverse_mapping[value]
dataset=[]
testset=[]
count=0
for file in os.listdir(train_dir):
t=0
path=os.path.join(train_dir,file)
for im in os.listdir(path):
image=load_img(os.path.join(path,im), grayscale=False, color_mode='rgb', target_size=(40,40))
image=img_to_array(image)
image=image/255.0
if t<60:
dataset+=[[image,count]]
else:
testset+=[[image,count]]
t+=1
count=count+1
data,labels0=zip(*dataset)
test,testlabels0=zip(*testset)
labels1=to_categorical(labels0)
labels=np.array(labels1)
# Transforming the into Numerical Data
data=np.array(data)
test=np.array(test)
trainx,testx,trainy,testy=train_test_split(data,labels,test_size=0.2,random_state=44)
print(trainx.shape)
print(testx.shape)
print(trainy.shape)
print(testy.shape)
# Data augmentation
datagen = ImageDataGenerator(horizontal_flip=True,vertical_flip=True,rotation_range=20,zoom_range=0.2,
width_shift_range=0.2,height_shift_range=0.2,shear_range=0.1,fill_mode="nearest")
# Loading the pretrained model , here DenseNet201
pretrained_model3 = tf.keras.applications.DenseNet201(input_shape=(40,40,3),include_top=False,weights='imagenet',pooling='avg')
pretrained_model3.trainable = False
inputs3 = pretrained_model3.input
x3 = tf.keras.layers.Dense(128, activation='relu')(pretrained_model3.output)
outputs3 = tf.keras.layers.Dense(107, activation='softmax')(x3)
model = tf.keras.Model(inputs=inputs3, outputs=outputs3)
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
his=model.fit(datagen.flow(trainx,trainy,batch_size=32),validation_data=(testx,testy),epochs=50)
y_pred=model.predict(testx)
pred=np.argmax(y_pred,axis=1)
ground = np.argmax(testy,axis=1)
print(classification_report(ground,pred))
#Checking accuracy of our model
get_acc = his.history['accuracy']
value_acc = his.history['val_accuracy']
get_loss = his.history['loss']
validation_loss = his.history['val_loss']
epochs = range(len(get_acc))
plt.plot(epochs, get_acc, 'r', label='Accuracy of Training data')
plt.plot(epochs, value_acc, 'b', label='Accuracy of Validation data')
plt.title('Training vs validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# Checking the loss of data
epochs = range(len(get_loss))
plt.plot(epochs, get_loss, 'r', label='Loss of Training data')
plt.plot(epochs, validation_loss, 'b', label='Loss of Validation data')
plt.title('Training vs validation loss')
plt.legend(loc=0)
plt.figure()
plt.show()
load_img("pose_recognition_data/dataset/adho mukha svanasana/95. downward-facing-dog-pose.png",target_size=(40,40))
image = load_img("pose_recognition_data/dataset/adho mukha svanasana/95. downward-facing-dog-pose.png",target_size=(40,40))
image=img_to_array(image)
image=image/255.0
prediction_image=np.array(image)
prediction_image= np.expand_dims(image, axis=0)
prediction=model.predict(prediction_image)
value=np.argmax(prediction)
move_name=mapper(value)
print("Prediction is {}.".format(move_name))
print(test.shape)
pred2=model.predict(test)
print(pred2.shape)
PRED=[]
for item in pred2:
value2=np.argmax(item)
PRED+=[value2]
ANS=testlabels0
accuracy=accuracy_score(ANS,PRED)
print(accuracy)
```
| github_jupyter |
TVAE Model
===========
In this guide we will go through a series of steps that will let you
discover functionalities of the `TVAE` model, including how to:
- Create an instance of `TVAE`.
- Fit the instance to your data.
- Generate synthetic versions of your data.
- Use `TVAE` to anonymize PII information.
- Specify hyperparameters to improve the output quality.
What is TVAE?
--------------
The `sdv.tabular.TVAE` model is based on the VAE-based Deep Learning
data synthesizer which was presented at the NeurIPS 2020 conference by
the paper titled [Modeling Tabular data using Conditional
GAN](https://arxiv.org/abs/1907.00503).
Let\'s now discover how to learn a dataset and later on generate
synthetic data with the same format and statistical properties by using
the `TVAE` class from SDV.
Quick Usage
-----------
We will start by loading one of our demo datasets, the
`student_placements`, which contains information about MBA students that
applied for placements during the year 2020.
<div class="alert alert-warning">
**Warning**
In order to follow this guide you need to have `tvae` installed on your
system. If you have not done it yet, please install `tvae` now by
executing the command `pip install sdv` in a terminal.
</div>
```
from sdv.demo import load_tabular_demo
data = load_tabular_demo('student_placements')
data.head()
```
As you can see, this table contains information about students which
includes, among other things:
- Their id and gender
- Their grades and specializations
- Their work experience
- The salary that they were offered
- The duration and dates of their placement
You will notice that there is data with the following characteristics:
- There are float, integer, boolean, categorical and datetime values.
- There are some variables that have missing data. In particular, all
the data related to the placement details is missing in the rows
where the student was not placed.
T There are float, integer, boolean, categorical and datetime values.
- There are some variables that have missing data. In particular, all
the data related to the placement details is missing in the rows
where the student was not placed.
Let us use `TVAE` to learn this data and then sample synthetic data
about new students to see how well the model captures the characteristics
indicated above. In order to do this you will need to:
- Import the `sdv.tabular.TVAE` class and create an instance of it.
- Call its `fit` method passing our table.
- Call its `sample` method indicating the number of synthetic rows
that you want to generate.
```
from sdv.tabular import TVAE
model = TVAE()
model.fit(data)
```
<div class="alert alert-info">
**Note**
Notice that the model `fitting` process took care of transforming the
different fields using the appropriate [Reversible Data
Transforms](http://github.com/sdv-dev/RDT) to ensure that the data has a
format that the underlying TVAESynthesizer class can handle.
</div>
### Generate synthetic data from the model
Once the modeling has finished you are ready to generate new synthetic
data by calling the `sample` method from your model passing the number
of rows that we want to generate.
```
new_data = model.sample(num_rows=200)
```
This will return a table identical to the one which the model was fitted
on, but filled with new data which resembles the original one.
```
new_data.head()
```
<div class="alert alert-info">
**Note**
There are a number of other parameters in this method that you can use to
optimize the process of generating synthetic data. Use ``output_file_path``
to directly write results to a CSV file, ``batch_size`` to break up sampling
into smaller pieces & track their progress and ``randomize_samples`` to
determine whether to generate the same synthetic data every time.
See the <a href=https://sdv.dev/SDV/api_reference/tabular/api/sdv.tabular.ctgan.TVAE.sample>API Section</a>
for more details.
</div>
### Save and Load the model
In many scenarios it will be convenient to generate synthetic versions
of your data directly in systems that do not have access to the original
data source. For example, if you may want to generate testing data on
the fly inside a testing environment that does not have access to your
production database. In these scenarios, fitting the model with real
data every time that you need to generate new data is feasible, so you
will need to fit a model in your production environment, save the fitted
model into a file, send this file to the testing environment and then
load it there to be able to `sample` from it.
Let\'s see how this process works.
#### Save and share the model
Once you have fitted the model, all you need to do is call its `save`
method passing the name of the file in which you want to save the model.
Note that the extension of the filename is not relevant, but we will be
using the `.pkl` extension to highlight that the serialization protocol
used is [pickle](https://docs.python.org/3/library/pickle.html).
```
model.save('my_model.pkl')
```
This will have created a file called `my_model.pkl` in the same
directory in which you are running SDV.
<div class="alert alert-info">
**Important**
If you inspect the generated file you will notice that its size is much
smaller than the size of the data that you used to generate it. This is
because the serialized model contains **no information about the
original data**, other than the parameters it needs to generate
synthetic versions of it. This means that you can safely share this
`my_model.pkl` file without the risc of disclosing any of your real
data!
</div>
#### Load the model and generate new data
The file you just generated can be sent over to the system where the
synthetic data will be generated. Once it is there, you can load it
using the `TVAE.load` method, and then you are ready to sample new data
from the loaded instance:
```
loaded = TVAE.load('my_model.pkl')
new_data = loaded.sample(num_rows=200)
```
<div class="alert alert-warning">
**Warning**
Notice that the system where the model is loaded needs to also have
`sdv` and `tvae` installed, otherwise it will not be able to load the
model and use it.
</div>
### Specifying the Primary Key of the table
One of the first things that you may have noticed when looking at the demo
data is that there is a `student_id` column which acts as the primary
key of the table, and which is supposed to have unique values. Indeed,
if we look at the number of times that each value appears, we see that
all of them appear at most once:
```
data.student_id.value_counts().max()
```
However, if we look at the synthetic data that we generated, we observe
that there are some values that appear more than once:
```
new_data[new_data.student_id == new_data.student_id.value_counts().index[0]]
```
This happens because the model was not notified at any point about the
fact that the `student_id` had to be unique, so when it generates new
data it will provoke collisions sooner or later. In order to solve this,
we can pass the argument `primary_key` to our model when we create it,
indicating the name of the column that is the index of the table.
```
model = TVAE(
primary_key='student_id'
)
model.fit(data)
new_data = model.sample(200)
new_data.head()
```
As a result, the model will learn that this column must be unique and
generate a unique sequence of values for the column:
```
new_data.student_id.value_counts().max()
```
### Anonymizing Personally Identifiable Information (PII)
There will be many cases where the data will contain Personally
Identifiable Information which we cannot disclose. In these cases, we
will want our Tabular Models to replace the information within these
fields with fake, simulated data that looks similar to the real one but
does not contain any of the original values.
Let\'s load a new dataset that contains a PII field, the
`student_placements_pii` demo, and try to generate synthetic versions of
it that do not contain any of the PII fields.
<div class="alert alert-info">
**Note**
The `student_placements_pii` dataset is a modified version of the
`student_placements` dataset with one new field, `address`, which
contains PII information about the students. Notice that this additional
`address` field has been simulated and does not correspond to data from
the real users.
</div>
```
data_pii = load_tabular_demo('student_placements_pii')
data_pii.head()
```
If we use our tabular model on this new data we will see how the
synthetic data that it generates discloses the addresses from the real
students:
```
model = TVAE(
primary_key='student_id',
)
model.fit(data_pii)
new_data_pii = model.sample(200)
new_data_pii.head()
```
More specifically, we can see how all the addresses that have been
generated actually come from the original dataset:
```
new_data_pii.address.isin(data_pii.address).sum()
```
In order to solve this, we can pass an additional argument
`anonymize_fields` to our model when we create the instance. This
`anonymize_fields` argument will need to be a dictionary that contains:
- The name of the field that we want to anonymize.
- The category of the field that we want to use when we generate fake
values for it.
The list complete list of possible categories can be seen in the [Faker
Providers](https://faker.readthedocs.io/en/master/providers.html) page,
and it contains a huge list of concepts such as:
- name
- address
- country
- city
- ssn
- credit_card_number
- credit_card_expire
- credit_card_security_code
- email
- telephone
- \...
In this case, since the field is an address, we will pass a
dictionary indicating the category `address`
```
model = TVAE(
primary_key='student_id',
anonymize_fields={
'address': 'address'
}
)
model.fit(data_pii)
```
As a result, we can see how the real `address` values have been replaced
by other fake addresses:
```
new_data_pii = model.sample(200)
new_data_pii.head()
```
Which means that none of the original addresses can be found in the
sampled data:
```
data_pii.address.isin(new_data_pii.address).sum()
```
As we can see, in this case these modifications changed the obtained
results slightly, but they did neither introduce dramatic changes in the
performance.
### Conditional Sampling
As the name implies, conditional sampling allows us to sample from a conditional distribution using the `TVAE` model, which means we can generate only values that satisfy certain conditions. These conditional values can be passed to the `sample_conditions` method as a list of `sdv.sampling.Condition` objects or to the `sample_remaining_columns` method as a dataframe.
When specifying a `sdv.sampling.Condition` object, we can pass in the desired conditions as a dictionary, as well as specify the number of desired rows for that condition.
```
from sdv.sampling import Condition
condition = Condition({
'gender': 'M'
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
It's also possible to condition on multiple columns, such as `gender = M, 'experience_years': 0`.
```
condition = Condition({
'gender': 'M',
'experience_years': 0
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
In the `sample_remaining_columns` method, `conditions` is passed as a dataframe. In that case, the model will generate one sample for each row of the dataframe, sorted in the same order. Since the model already knows how many samples to generate, passing it as a parameter is unnecessary. For example, if we want to generate three samples where `gender = M` and three samples with `gender = F`, we can do the following:
```
import pandas as pd
conditions = pd.DataFrame({
'gender': ['M', 'M', 'M', 'F', 'F', 'F'],
})
model.sample_remaining_columns(conditions)
```
`TVAE` also supports conditioning on continuous values, as long as the values are within the range of seen numbers. For example, if all the values of the dataset are within 0 and 1, `TVAE` will not be able to set this value to 1000.
```
condition = Condition({
'degree_perc': 70.0
}, num_rows=5)
model.sample_conditions(conditions=[condition])
```
<div class="alert alert-info">
**Note**
Currently, conditional sampling works through a rejection sampling process, where
rows are sampled repeatedly until one that satisfies the conditions is found.
In case you are not able to sample enough valid rows, update the related parameters:
increasing ``max_tries`` or increasing ``batch_size_per_try``.
More information about these paramters can be found in the
<a href=https://sdv.dev/SDV/api_reference/tabular/api/sdv.tabular.ctgan.TVAE.sample_conditions.html> API section</a>.
If you have many conditions that cannot easily be satisified, consider switching
to the <a href=https://sdv.dev/SDV/user_guides/single_table/gaussian_copula.html>GaussianCopula model</a>, which is able to handle conditional
sampling more efficiently.
</div>
### How do I specify constraints?
If you look closely at the data you may notice that some properties were
not completely captured by the model. For example, you may have seen
that sometimes the model produces an `experience_years` number greater
than `0` while also indicating that `work_experience` is `False`. These
types of properties are what we call `Constraints` and can also be
handled using `SDV`. For further details about them please visit the
[Handling Constraints](04_Handling_Constraints.ipynb) guide.
### Can I evaluate the Synthetic Data?
A very common question when someone starts using **SDV** to generate
synthetic data is: *\"How good is the data that I just generated?\"*
In order to answer this question, **SDV** has a collection of metrics
and tools that allow you to compare the *real* that you provided and the
*synthetic* data that you generated using **SDV** or any other tool.
You can read more about this in the [Evaluating Synthetic Data Generators](
05_Evaluating_Synthetic_Data_Generators.ipynb) guide.
| github_jupyter |
```
!pip install kornia
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from scipy import io
import torch.utils.data
import scipy
from scipy.stats import entropy
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import math
from sklearn.metrics import mean_squared_error
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
!pip install -U spectral
!pip install pytorch_ssim
from pytorch_ssim import ssim
if not (os.path.isfile('/content/Salinas_corrected.mat')):
!wget https://github.com/gokriznastic/HybridSN/raw/master/data/Salinas_corrected.mat
if not (os.path.isfile('/content/Salinas_gt.mat')):
!wget https://github.com/gokriznastic/HybridSN/raw/master/data/Salinas_gt.mat
from torch.nn import Module, Sequential, Conv2d, ReLU,AdaptiveMaxPool2d, AdaptiveAvgPool2d, \
NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
from torch.nn import functional as F
import scipy.io as sio
def loadData():
data = sio.loadmat('Salinas_corrected.mat')['salinas_corrected']
labels = sio.loadmat('Salinas_gt.mat')['salinas_gt']
return data, labels
def padWithZeros(X, margin=2):
## From: https://github.com/gokriznastic/HybridSN/blob/master/Hybrid-Spectral-Net.ipynb
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
## From: https://github.com/gokriznastic/HybridSN/blob/master/Hybrid-Spectral-Net.ipynb
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]), dtype=np.uint8)
patchesLabels = np.zeros((X.shape[0] * X.shape[1]), dtype=np.uint8)
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
class HyperSpectralDataset(Dataset):
"""HyperSpectral dataset."""
def __init__(self,data_url,label_url):
self.data = np.array(scipy.io.loadmat('/content/'+data_url.split('/')[-1])['salinas_corrected'])
self.targets = np.array(scipy.io.loadmat('/content/'+label_url.split('/')[-1])['salinas_gt'])
self.data, self.targets = createImageCubes(self.data,self.targets, windowSize=5)
self.data = torch.Tensor(self.data)
self.data = self.data.permute(0,3,1,2)
print(self.data.shape)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
return self.data[idx,:,:,:] , self.targets[idx]
data_train = HyperSpectralDataset('Salinas_corrected.mat','Salinas_gt.mat')
train_loader = DataLoader(data_train, batch_size=16, shuffle=True)
print(data_train.__getitem__(0)[0].shape)
print(data_train.__len__())
class PAM_Module(Module):
""" Position attention module https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
#out = F.avg_pool2d(out, out.size()[2:4])
return out
class CAM_Module(Module):
""" Channel attention module https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py"""
def __init__(self):
super(CAM_Module, self).__init__()
#self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
#out = F.avg_pool2d(out, out.size()[2:4])
return out
class RecNet(nn.Module):
def __init__(self):
super(RecNet, self).__init__()
self.conv3d_1 = nn.Sequential(nn.Conv3d(1, 128, (1, 3, 3), 1),
nn.BatchNorm3d(128),
nn.PReLU())
self.conv3d_2 = nn.Sequential(nn.Conv3d(128, 64, (1, 3, 3), 1),
nn.BatchNorm3d(64),
nn.PReLU())
self.pool3d = nn.MaxPool3d((1, 1, 1), (1, 1, 1))
self.deconv3d_1 = nn.Sequential(nn.ConvTranspose3d(64, 128, (1, 3, 3), 1),
nn.BatchNorm3d(128),
nn.PReLU())
self.deconv3d_2 = nn.Sequential(nn.ConvTranspose3d(128, 1, (1, 3, 3), 1),
nn.BatchNorm3d(1))
def forward(self, x):
x = self.conv3d_1(x)
x = self.conv3d_2(x)
x = self.pool3d(x)
x = self.deconv3d_1(x)
x = self.deconv3d_2(x)
return x.squeeze(1)
class DANet(Module):
def __init__(self):
super(DANet,self).__init__()
self.PAM_Module = PAM_Module(204)
self.CAM_Module = CAM_Module()
self.RecNet = RecNet()
def forward(self,x):
P = self.PAM_Module(x)
C = self.CAM_Module(x)
#B,Ch,H,W = P.size()
J = P + C
J = J.unsqueeze(1)
ret = self.RecNet(J)
return ret
danet_model = DANet().to(device)
from torchsummary import summary
summary(danet_model,input_size=(204,5,5))
!nvidia-smi
#model = BSNET_Conv().to(device)
optimizer = optim.SGD(danet_model.parameters(), lr=0.005, momentum=0.9)optimizer = optim.SGD(danet_model.parameters(), lr=0.005, momentum=0.9)
top = 20
import skimage
import kornia
global bsnlist
ssim = kornia.losses.SSIM(5, reduction='none')
psnr = kornia.losses.PSNRLoss(2500)
from skimage import measure
ssim_list = []
psnr_list = []
l1_list = []
channel_weight_list = []
def train(epoch):
danet_model.train()
ENTROPY = torch.zeros(204)
for batch_idx, (data, __) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = danet_model(data)
loss = F.l1_loss(output,data)
loss.backward()
optimizer.step()
D = output.detach().cpu().numpy()
for i in range(0,204):
ENTROPY[i]+=skimage.measure.shannon_entropy(D[:,i,:,:])
if batch_idx % (0.5*len(train_loader)) == 0:
L1 = loss.item()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),L1))
l1_list.append(L1)
ssim_val = torch.mean(ssim(data,output))
print("SSIM: {}".format(ssim_val))
ssim_list.append(ssim_val)
psnr_val = psnr(data,output)
print("PSNR: {}".format(psnr_val))
psnr_list.append(psnr_val)
ENTROPY = np.array(ENTROPY)
bsnlist = np.asarray(ENTROPY.argsort()[-top:][::-1])
print('Top {} bands with Entropy ->'.format(top),list(bsnlist))
for epoch in range(0, 10):
train(epoch)
x,xx,xxx = psnr_list,ssim_list,l1_list
print(len(x)),print(len(xx)),print(len(xxx))
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
np.save('psnr_SV.npy',np.asarray(x))
np.save('ssim_SV.npy',np.asarray(xx))
np.save('l1_SV.npy',np.asarray(xxx))
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('PSNR',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(np.arange(0,100 , 10.0),fontsize=40)
plt.ylim(10,100)
plt.plot(x,linewidth=5.0)
plt.savefig('PSNR-SV.pdf')
plt.show()
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('SSIM',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(fontsize=40)
plt.ylim(0,0.6)
plt.plot(xx,linewidth=5.0)
plt.savefig('SSIM-SV.pdf')
plt.show()
plt.figure(figsize=(20,10))
plt.xlabel('Epoch',fontsize=50)
plt.ylabel('L1 Reconstruction loss',fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(fontsize=40)
plt.ylim(0,160)
plt.plot(xxx,linewidth=5.0)
plt.savefig('L1-SV.pdf')
plt.show()
from google.colab import files
files.download('SSIM-SV.pdf')
files.download('PSNR-SV.pdf')
files.download('L1-SV.pdf')
!wget https://raw.githubusercontent.com/ucalyptus/Double-Branch-Dual-Attention-Mechanism-Network/master/SV.csv
dabsrecnet = [24, 42, 63, 77, 57, 49, 35, 68, 64, 69, 50, 44, 43, 15, 90, 37, 48, 72, 54, 79]
bsnetconv = [116,153,19,189,97,179,171,141,95,144,142,46,104,203,91,18,176,108,150,194]
pca = [169,67,168,63,68,78,167,166,165,69,164,163,77,162,70,62,160,161,76,158]
spabs = [0,79,166,80,203,78,77,76,55,81,97,5,23,75,2,82,56,74,143,85]
snmf = [24,1,105,196,203,0,39,116,38,60,89,104,198,147,158,3,146,4,93,88]
issc = [141,182,106,147,107,146,108,202,203,109,145,148,112,201,110,113,144,149,105,154]
def MeanSpectralDivergence(band_subset):
n_row, n_column, n_band = band_subset.shape
N = n_row * n_column
hist = []
for i in range(n_band):
hist_, _ = np.histogram(band_subset[:, :, i], 256)
hist.append(hist_ / N)
hist = np.asarray(hist)
hist[np.nonzero(hist <= 0)] = 1e-20
# entropy_lst = entropy(hist.transpose())
info_div = 0
# band_subset[np.nonzero(band_subset <= 0)] = 1e-20
for b_i in range(n_band):
for b_j in range(n_band):
band_i = hist[b_i].reshape(-1)/np.sum(hist[b_i])
band_j = hist[b_j].reshape(-1)/np.sum(hist[b_j])
entr_ij = entropy(band_i, band_j)
entr_ji = entropy(band_j, band_i)
entr_sum = entr_ij + entr_ji
info_div += entr_sum
msd = info_div * 2 / (n_band * (n_band - 1))
return msd
def MeanSpectralAngle(band_subset):
"""
Spectral Angle (SA) is defined as the angle between two bands.
We use Mean SA (MSA) to quantify the redundancy among a band set.
i-th band B_i, and j-th band B_j,
SA = arccos [B_i^T * B_j / ||B_i|| * ||B_j||]
MSA = 2/n*(n-1) * sum(SA_ij)
Ref:
[1] GONG MAOGUO, ZHANG MINGYANG, YUAN YUAN. Unsupervised Band Selection Based on Evolutionary Multiobjective
Optimization for Hyperspectral Images [J]. IEEE Transactions on Geoscience and Remote Sensing, 2016, 54(1): 544-57.
:param band_subset: with shape (n_row, n_clm, n_band)
:return:
"""
n_row, n_column, n_band = band_subset.shape
spectral_angle = 0
for i in range(n_band):
for j in range(n_band):
band_i = band_subset[i].reshape(-1)
band_j = band_subset[j].reshape(-1)
lower = np.sum(band_i ** 2) ** 0.5 * np.sum(band_j ** 2) ** 0.5
higher = np.dot(band_i, band_j)
if higher / lower > 1.:
angle_ij = np.arccos(1. - 1e-16)
# print('1-higher-lower', higher - lower)
# elif higher / lower < -1.:
# angle_ij = np.arccos(1e-8 - 1.)
# print('2-higher-lower', higher - lower)
else:
angle_ij = np.arccos(higher / lower)
spectral_angle += angle_ij
msa = spectral_angle * 2 / (n_band * (n_band - 1))
return msa
def MSA(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(MeanSpectralAngle(band_subset),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
MSA(dabsrecnet)
MSA(bsnetconv)
MSA(pca)
MSA(spabs)
MSA(snmf)
MSA(issc)
def MSD(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(MeanSpectralDivergence(band_subset),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
MSD(dabsrecnet)
MSD(bsnetconv)
MSD(pca)
MSD(spabs)
MSD(snmf)
MSD(issc)
import skimage
from skimage import measure
def sumentr(band_subset,X):
nbands = len(band_subset)
ENTROPY=np.ones(nbands)
for i in range(0,len(band_subset)):
ENTROPY[i]+=skimage.measure.shannon_entropy(X[:,:,band_subset[i]])
return np.sum(ENTROPY)
def EntropySum(bsnlist):
X, _ = loadData()
print('[',end=" ")
for a in range(2,len(bsnlist)):
band_subset_list = []
for i in bsnlist[:a]:
band_subset_list.append(X[:,:,i])
band_subset = np.array(band_subset_list)
band_subset = np.stack(band_subset,axis =2)
print(sumentr(bsnlist[:a],X),end=" ")
if a!= len(bsnlist)-1:
print(",",end=" ")
print(']')
EntropySum(dabsrecnet)
EntropySum(bsnetconv)
EntropySum(pca)
EntropySum(spabs)
EntropySum(snmf)
EntropySum(issc)
if not (os.path.isfile('/content/SV.csv')):
!wget https://raw.githubusercontent.com/ucalyptus/Double-Branch-Dual-Attention-Mechanism-Network/master/SV.csv
import pandas as pd
import re
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv("/content/SV.csv")
import matplotlib.pyplot as plt
X, _ = loadData()
n_row,n_column,n_band= X.shape
N = n_row * n_column
hist = []
Entropy = []
for i in range(n_band):
hist_, _ = np.histogram(X[:, :, i], 256)
hist.append(hist_ / N)
band_i = hist[i].reshape(-1)/np.sum(hist[i])
entr_i = entropy(band_i)
Entropy.append(entr_i)
for i in range(0,len(df['Selected Bands'])):
df['Selected Bands'][i] = re.findall('[0-9]+', df['Selected Bands'][i])
df['Selected Bands'][i] = [int(k) for k in df['Selected Bands'][i]]
meth = ["BS-Net-Conv","SpaBS","PCA","SNMF","DARecNet-BS"]
cols = ['b','y','g','r','m']
fig1,(ax1,ax2) = plt.subplots(2,sharex='col',figsize=(37,20))
ax1.grid(True)
ax1.yaxis.grid(False)
ax1.set_xticks([0,7,15,30,45,60,75,90,105,120,135,150,165,180,195,205])
ax1.yaxis.set_tick_params(labelsize=55)
plt.ylabel(meth)
scatar = []
for i in range(0,len(meth)):
ax1.hlines(y = meth[i],xmin=min(df['Selected Bands'][i]),xmax=max(df['Selected Bands'][i]),colors=cols[i],linewidth=7)
SCATTER = ax1.scatter(x=df['Selected Bands'][i],y = [i]*20,edgecolors=cols[i-1],linewidths=14)
scatar.append(SCATTER)
ax2.grid(True)
ax2.yaxis.grid(False)
ax2.set_yticks([1,2,3,4,5])
ax2.set_ylabel("Value of Entropy",fontsize=55)
ax2.set_xlabel("Spectral Band",fontsize=55)
ax2.xaxis.set_tick_params(labelsize=55)
ax2.yaxis.set_tick_params(labelsize=55)
ax2.plot(Entropy,linewidth=7)
plt.savefig('Entropy_SV.pdf')
```
| github_jupyter |
## _*H2 ground state energy computation using Iterative QPE*_
This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using IQPE (Iterative Quantum Phase Estimation) algorithm. It is compared to the same energies as computed by the ExactEigensolver
This notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the qiskit_chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.
This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires.
```
import numpy as np
import pylab
from qiskit import LegacySimulators
from qiskit_chemistry import QiskitChemistry
import time
# Input dictionary to configure Qiskit Chemistry for the chemistry problem.
qiskit_chemistry_dict = {
'driver': {'name': 'PYSCF'},
'PYSCF': {'atom': '', 'basis': 'sto3g'},
'operator': {'name': 'hamiltonian', 'transformation': 'full', 'qubit_mapping': 'parity'},
'algorithm': {'name': ''},
'initial_state': {'name': 'HartreeFock'},
}
molecule = 'H .0 .0 -{0}; H .0 .0 {0}'
algorithms = [
{
'name': 'IQPE',
'num_iterations': 16,
'num_time_slices': 3000,
'expansion_mode': 'trotter',
'expansion_order': 1,
},
{
'name': 'ExactEigensolver'
}
]
backends = [
LegacySimulators.get_backend('qasm_simulator'),
None
]
start = 0.5 # Start distance
by = 0.5 # How much to increase distance by
steps = 20 # Number of steps to increase by
energies = np.empty([len(algorithms), steps+1])
hf_energies = np.empty(steps+1)
distances = np.empty(steps+1)
import concurrent.futures
import multiprocessing as mp
import copy
def subrountine(i, qiskit_chemistry_dict, d, backend, algorithm):
solver = QiskitChemistry()
qiskit_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2)
qiskit_chemistry_dict['algorithm'] = algorithm
result = solver.run(qiskit_chemistry_dict, backend=backend)
return i, d, result['energy'], result['hf_energy']
start_time = time.time()
max_workers = max(4, mp.cpu_count())
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = []
for j in range(len(algorithms)):
algorithm = algorithms[j]
backend = backends[j]
for i in range(steps+1):
d = start + i*by/steps
future = executor.submit(
subrountine,
i,
copy.deepcopy(qiskit_chemistry_dict),
d,
backend,
algorithm
)
futures.append(future)
for future in concurrent.futures.as_completed(futures):
i, d, energy, hf_energy = future.result()
energies[j][i] = energy
hf_energies[i] = hf_energy
distances[i] = d
print(' --- complete')
print('Distances: ', distances)
print('Energies:', energies)
print('Hartree-Fock energies:', hf_energies)
print("--- %s seconds ---" % (time.time() - start_time))
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
pylab.plot(distances, energies[j], label=algorithms[j]['name'])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy')
pylab.legend(loc='upper right')
pylab.show()
pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock')
pylab.plot(distances, np.subtract(energies[0], energies[1]), label='IQPE')
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('Energy difference from ExactEigensolver')
pylab.legend(loc='upper right')
pylab.show()
```
| github_jupyter |
# ML Pipeline Preparation
Follow the instructions below to help you create your ML pipeline.
### 1. Import libraries and load data from database.
- Import Python libraries
- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
- Define feature and target variables X and Y
```
# import necessary libraries
import pandas as pd
import numpy as np
import os
import pickle
import nltk
import re
from sqlalchemy import create_engine
import sqlite3
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier,AdaBoostClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import hmean
from scipy.stats.mstats import gmean
from nltk.corpus import stopwords
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])
import matplotlib.pyplot as plt
%matplotlib inline
# load data from database
engine = create_engine('sqlite:///InsertDatabaseName.db')
df = pd.read_sql("SELECT * FROM InsertTableName", engine)
df.head()
# View types of unque 'genre' attribute
genre_types = df.genre.value_counts()
genre_types
# check for attributes with missing values/elements
df.isnull().mean().head()
# drops attributes with missing values
df.dropna()
df.head()
# load data from database with 'X' as attributes for message column
X = df["message"]
# load data from database with 'Y' attributes for the last 36 columns
Y = df.drop(['id', 'message', 'original', 'genre'], axis = 1)
```
### 2. Write a tokenization function to process your text data
```
# Proprocess text by removing unwanted properties
def tokenize(text):
'''
input:
text: input text data containing attributes
output:
clean_tokens: cleaned text without unwanted texts
'''
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# take out all punctuation while tokenizing
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
# lemmatize as shown in the lesson
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
```
### 3. Build a machine learning pipeline
This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
```
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier())),
])
# Visualize model parameters
pipeline.get_params()
```
### 4. Train pipeline
- Split data into train and test sets
- Train pipeline
```
# use sklearn split function to split dataset into train and 20% test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2)
# Train pipeline using RandomForest Classifier algorithm
pipeline.fit(X_train, y_train)
```
### 5. Test your model
Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's classification_report on each.
```
# Output result metrics of trained RandomForest Classifier algorithm
def evaluate_model(model, X_test, y_test):
'''
Input:
model: RandomForest Classifier trained model
X_test: Test training features
Y_test: Test training response variable
Output:
None:
Display model precision, recall, f1-score, support
'''
y_pred = model.predict(X_test)
for item, col in enumerate(y_test):
print(col)
print(classification_report(y_test[col], y_pred[:, item]))
# classification_report to display model precision, recall, f1-score, support
evaluate_model(pipeline, X_test, y_test)
```
### 6. Improve your model
Use grid search to find better parameters.
```
parameters = {'clf__estimator__max_depth': [10, 50, None],
'clf__estimator__min_samples_leaf':[2, 5, 10]}
cv = GridSearchCV(pipeline, parameters)
```
### 7. Test your model
Show the accuracy, precision, and recall of the tuned model.
Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
```
# Train pipeline using the improved model
cv.fit(X_train, y_train)
# # classification_report to display model precision, recall, f1-score, support
evaluate_model(cv, X_test, y_test)
cv.best_estimator_
```
### 8. Try improving your model further. Here are a few ideas:
* try other machine learning algorithms
* add other features besides the TF-IDF
```
# Improve model using DecisionTree Classifier
new_pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(DecisionTreeClassifier()))
])
# Train improved model
new_pipeline.fit(X_train, y_train)
# Run result metric score display function
evaluate_model(new_pipeline, X_test, y_test)
```
### 9. Export your model as a pickle file
```
# save a copy file of the the trained model to disk
trained_model_file = 'trained_model.sav'
pickle.dump(cv, open(trained_model_file, 'wb'))
```
### 10. Use this notebook to complete `train.py`
Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| github_jupyter |
```
#using tensorflow kernel
import tensorflow as tf
print(tf.__version__)
!pip list | grep waymo
!pip list | grep torch
!nvidia-smi
import tensorflow.compat.v1 as tf
import math
import numpy as np
import itertools
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
#tf.enable_eager_execution()
import os
import argparse
from pathlib import Path
import cv2
import json
import utils
from PIL import Image
from glob import glob
import sys
import datetime
import os
WAYMO_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist']
def get_camera_labels(frame):
if frame.camera_labels:
return frame.camera_labels
return frame.projected_lidar_labels
def extract_segment_frontcamera(tfrecord_files, out_dir, step):
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
if image.name != 1: #Only use front camera
continue
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: do not use sub-folder
image_id = '_'.join([context_name, image_name])
#image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
def extract_segment_allcamera(tfrecord_files, out_dir, step):
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: use sub-folder
#image_id = '_'.join([context_name, image_name])
image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
def extract_segment_allfrontcamera(PATH,folderslist, out_dir, step):
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
#PATH='/data/cmpe295-liu/Waymo'
images = []
annotations = []
categories = [{'id': i, 'name': n} for i, n in enumerate(WAYMO_CLASSES)][1:]
image_globeid=0
for index in range(len(folderslist)):
foldername=folderslist[index]
print("Folder name:", foldername)
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print("Num of tfrecord file:", len(tfrecord_files))
#print(tfrecord_files)
for segment_path in tfrecord_files:
print(f'extracting {segment_path}')
segment_path=Path(segment_path)#convert str to Path object
segment_name = segment_path.name
print(segment_name)
segment_out_dir = out_dir # remove segment_name as one folder, duplicate with image name
# segment_out_dir = out_dir / segment_name
# print(segment_out_dir)#output path + segment_name(with tfrecord)
# segment_out_dir.mkdir(parents=True, exist_ok=True)
dataset = tf.data.TFRecordDataset(str(segment_path), compression_type='')
for i, data in enumerate(dataset):
if i % step != 0:
continue
print('.', end='', flush=True)
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
#get one frame
context_name = frame.context.name
frame_timestamp_micros = str(frame.timestamp_micros)
for index, image in enumerate(frame.images):
if image.name != 1: #Only use front camera
continue
camera_name = open_dataset.CameraName.Name.Name(image.name)
image_globeid = image_globeid + 1
#print("camera name:", camera_name)
img = tf.image.decode_jpeg(image.image).numpy()
image_name='_'.join([frame_timestamp_micros, camera_name])#image name
#image_id = '/'.join([context_name, image_name]) #using "/" join, context_name is the folder
#New: do not use sub-folder
image_id = '_'.join([context_name, image_name])
#image_id = '/'.join([context_name, frame_timestamp_micros, camera_name]) #using "/" join
file_name = image_id + '.jpg'
#print(file_name)
file_name = '/'.join([foldername, file_name])
filepath = out_dir / file_name
#filepath = segment_out_dir / file_name
#print('Image output path',filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
#images.append(dict(file_name=file_name, id=image_id, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
images.append(dict(file_name=file_name, id=image_globeid, height=img.shape[0], width=img.shape[1], camera_name=camera_name))#new add camera_name
#print("current image id: ", image_globeid)
cv2.imwrite(str(filepath), img)
for camera_labels in get_camera_labels(frame):
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name == image.name:
# Iterate over the individual labels.
for label in camera_labels.labels:
# object bounding box.
width = int(label.box.length)
height = int(label.box.width)
x = int(label.box.center_x - 0.5 * width)
y = int(label.box.center_y - 0.5 * height)
area = width * height
annotations.append(dict(image_id=image_globeid,
bbox=[x, y, width, height], area=area, category_id=label.type,
object_id=label.id,
tracking_difficulty_level=2 if label.tracking_difficulty_level == 2 else 1,
detection_difficulty_level=2 if label.detection_difficulty_level == 2 else 1))
with (segment_out_dir / 'annotations.json').open('w') as f:
for i, anno in enumerate(annotations):
anno['id'] = i #set as image frame ID
json.dump(dict(images=images, annotations=annotations, categories=categories), f)
!rm -r /data/cmpe295-liu/WaymoExport
!rm -r /data/cmpe295-liu/WaymoExportAll/
!mkdir /data/cmpe295-liu/Waymo/WaymoCOCOsmall
!rm -r /data/cmpe295-liu/Waymo/WaymoCOCOsmall/Training
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
PATH='/data/cmpe295-liu/Waymo'
for index in range(len(folderslist)):
foldername=folderslist[index]
print(foldername)
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print(tfrecord_files)
len(folderslist)
folderslist[1]
foldername="training_0031"
tfrecord_files = glob(os.path.join(PATH, foldername, "*.tfrecord")) #[path for path in glob(os.path.join(PATH, foldername, "*.tfrecord"))]
print(tfrecord_files)
PATH='/data/cmpe295-liu/Waymo'
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026"]
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Training'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_frontcamera(tfrecord_files, out_dir, step)
PATH='/data/cmpe295-liu/Waymo'
folderslist = ["validation_0007","training_0006"]#,"training_0029","training_0028","training_0027","training_0026"]
#folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_frontcamera(tfrecord_files, out_dir, step)
PATH='/data/cmpe295-liu/Waymo'
#folderslist = ["training_0031","training_0030"]#,"training_0029","training_0028","training_0027","training_0026"]
folderslist = ["training_0031","training_0030","training_0029","training_0028","training_0027","training_0026","training_0025", "training_0024", "training_0023","training_0022","training_0021","training_0020","training_0019","training_0018","training_0017","training_0016","training_0015","training_0014","training_0013","training_0012","training_0011","training_0010","training_0009","training_0008","training_0007","training_0006","training_0005","training_0004","training_0003","training_0002","training_0001","training_0000"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCO/Training'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_allfrontcamera(PATH,folderslist, out_dir, step)
folderslist = validation_folders = ["validation_0000","validation_0001","validation_0002","validation_0003","validation_0004","validation_0005", "validation_0006", "validation_0007"]
tfrecord_files = [path for x in folderslist for path in glob(os.path.join(PATH, x, "*.tfrecord"))]
print(len(tfrecord_files))#total number of tfrecord files
out_dir='/data/cmpe295-liu/Waymo/WaymoCOCO/Validation'
step=5 #downsample
out_dir = Path(out_dir)
extract_segment_allfrontcamera(PATH,folderslist, out_dir, step)
#extract_segment_frontcamera(tfrecord_files, out_dir, step)
!ls /data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation
!pwd
!python /home/010796032/PytorchWork/WaymoDetectron2Train.py
FULL_LABEL_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist']
len(FULL_LABEL_CLASSES)
```
| github_jupyter |
# Random Signals
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).*
## Auto-Power Spectral Density
The (auto-) [power spectral density](https://en.wikipedia.org/wiki/Spectral_density#Power_spectral_density) (PSD) is defined as the Fourier transformation of the [auto-correlation function](correlation_functions.ipynb) (ACF).
### Definition
For a continuous-amplitude, real-valued, wide-sense stationary (WSS) random signal $x[k]$ the PSD is given as
\begin{equation}
\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \mathcal{F}_* \{ \varphi_{xx}[\kappa] \},
\end{equation}
where $\mathcal{F}_* \{ \cdot \}$ denotes the [discrete-time Fourier transformation](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) (DTFT) and $\varphi_{xx}[\kappa]$ the ACF of $x[k]$. Note that the DTFT is performed with respect to $\kappa$. The ACF of a random signal of finite length $N$ can be expressed by way of a linear convolution
\begin{equation}
\varphi_{xx}[\kappa] = \frac{1}{N} \cdot x_N[k] * x_N[-k].
\end{equation}
Taking the DTFT of the left- and right-hand side results in
\begin{equation}
\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, X_N(\mathrm{e}^{-\,\mathrm{j}\,\Omega}) =
\frac{1}{N} \, | X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2.
\end{equation}
The last equality results from the definition of the magnitude and the symmetry of the DTFT for real-valued signals. The spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ quantifies the amplitude density of the signal $x_N[k]$. It can be concluded from above result that the PSD quantifies the squared amplitude or power density of a random signal. This explains the term power spectral density.
### Properties
The properties of the PSD can be deduced from the properties of the ACF and the DTFT as:
1. From the link between the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ and the spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ derived above it can be concluded that the PSD is real valued
$$\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \in \mathbb{R}$$
2. From the even symmetry $\varphi_{xx}[\kappa] = \varphi_{xx}[-\kappa]$ of the ACF it follows that
$$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \Phi_{xx}(\mathrm{e}^{\,-\mathrm{j}\, \Omega}) $$
3. The PSD of an uncorrelated random signal is given as
$$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \sigma_x^2 + \mu_x^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) ,$$
which can be deduced from the [ACF of an uncorrelated signal](correlation_functions.ipynb#Properties).
4. The quadratic mean of a random signal is given as
$$ E\{ x[k]^2 \} = \varphi_{xx}[\kappa=0] = \frac{1}{2\pi} \int\limits_{-\pi}^{\pi} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) \,\mathrm{d} \Omega $$
The last relation can be found by expressing the ACF via the inverse DTFT of $\Phi_{xx}$ and considering that $\mathrm{e}^{\mathrm{j} \Omega \kappa} = 1$ when evaluating the integral for $\kappa=0$.
### Example - Power Spectral Density of a Speech Signal
In this example the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \,\Omega})$ of a speech signal of length $N$ is estimated by applying a discrete Fourier transformation (DFT) to its ACF. For a better interpretation of the PSD, the frequency axis $f = \frac{\Omega}{2 \pi} \cdot f_s$ has been chosen for illustration, where $f_s$ denotes the sampling frequency of the signal. The speech signal constitutes a recording of the vowel 'o' spoken from a German male, loaded into variable `x`.
In Python the ACF is stored in a vector with indices $0, 1, \dots, 2N - 2$ corresponding to the lags $\kappa = (0, 1, \dots, 2N - 2)^\mathrm{T} - (N-1)$. When computing the discrete Fourier transform (DFT) of the ACF numerically by the fast Fourier transform (FFT) one has to take this shift into account. For instance, by multiplying the DFT $\Phi_{xx}[\mu]$ by $\mathrm{e}^{\mathrm{j} \mu \frac{2 \pi}{2N - 1} (N-1)}$.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
# read audio file
fs, x = wavfile.read('../data/vocal_o_8k.wav')
x = np.asarray(x, dtype=float)
N = len(x)
# compute ACF
acf = 1/N * np.correlate(x, x, mode='full')
# compute PSD
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(2*N-1)*2*np.pi*(N-1)/(2*N-1))
f = np.fft.fftfreq(2*N-1, d=1/fs)
# plot PSD
plt.figure(figsize=(10, 4))
plt.plot(f, np.real(psd))
plt.title('Estimated power spectral density')
plt.ylabel(r'$\hat{\Phi}_{xx}(e^{j \Omega})$')
plt.xlabel(r'$f / Hz$')
plt.axis([0, 500, 0, 1.1*max(np.abs(psd))])
plt.grid()
```
**Exercise**
* What does the PSD tell you about the average spectral contents of a speech signal?
Solution: The speech signal exhibits a harmonic structure with the dominant fundamental frequency $f_0 \approx 100$ Hz and a number of harmonics $f_n \approx n \cdot f_0$ for $n > 0$. This due to the fact that vowels generate random signals which are in good approximation periodic. To generate vowels, the sound produced by the periodically vibrating vowel folds is filtered by the resonance volumes and articulators above the voice box. The spectrum of periodic signals is a line spectrum.
## Cross-Power Spectral Density
The cross-power spectral density is defined as the Fourier transformation of the [cross-correlation function](correlation_functions.ipynb#Cross-Correlation-Function) (CCF).
### Definition
For two continuous-amplitude, real-valued, wide-sense stationary (WSS) random signals $x[k]$ and $y[k]$, the cross-power spectral density is given as
\begin{equation}
\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mathcal{F}_* \{ \varphi_{xy}[\kappa] \},
\end{equation}
where $\varphi_{xy}[\kappa]$ denotes the CCF of $x[k]$ and $y[k]$. Note again, that the DTFT is performed with respect to $\kappa$. The CCF of two random signals of finite length $N$ and $M$ can be expressed by way of a linear convolution
\begin{equation}
\varphi_{xy}[\kappa] = \frac{1}{N} \cdot x_N[k] * y_M[-k].
\end{equation}
Note the chosen $\frac{1}{N}$-averaging convention corresponds to the length of signal $x$. If $N \neq M$, care should be taken on the interpretation of this normalization. In case of $N=M$ the $\frac{1}{N}$-averaging yields a [biased estimator](https://en.wikipedia.org/wiki/Bias_of_an_estimator) of the CCF, which consistently should be denoted with $\hat{\varphi}_{xy,\mathrm{biased}}[\kappa]$.
Taking the DTFT of the left- and right-hand side from above cross-correlation results in
\begin{equation}
\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, Y_M(\mathrm{e}^{-\,\mathrm{j}\,\Omega}).
\end{equation}
### Properties
1. The symmetries of $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ can be derived from the symmetries of the CCF and the DTFT as
$$ \underbrace {\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) = \Phi_{xy}^*(\mathrm{e}^{-\,\mathrm{j}\, \Omega})}_{\varphi_{xy}[\kappa] \in \mathbb{R}} =
\underbrace {\Phi_{yx}(\mathrm{e}^{\,- \mathrm{j}\, \Omega}) = \Phi_{yx}^*(\mathrm{e}^{\,\mathrm{j}\, \Omega})}_{\varphi_{yx}[-\kappa] \in \mathbb{R}},$$
from which $|\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})| = |\Phi_{yx}(\mathrm{e}^{\,\mathrm{j}\, \Omega})|$ can be concluded.
2. The cross PSD of two uncorrelated random signals is given as
$$ \Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mu_x^2 \mu_y^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) $$
which can be deduced from the CCF of an uncorrelated signal.
### Example - Cross-Power Spectral Density
The following example estimates and plots the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ of two random signals $x_N[k]$ and $y_M[k]$ of finite lengths $N = 64$ and $M = 512$.
```
N = 64 # length of x
M = 512 # length of y
# generate two uncorrelated random signals
np.random.seed(1)
x = 2 + np.random.normal(size=N)
y = 3 + np.random.normal(size=M)
N = len(x)
M = len(y)
# compute cross PSD via CCF
acf = 1/N * np.correlate(x, y, mode='full')
psd = np.fft.fft(acf)
psd = psd * np.exp(1j*np.arange(N+M-1)*2*np.pi*(M-1)/(2*M-1))
psd = np.fft.fftshift(psd)
Om = 2*np.pi * np.arange(0, N+M-1) / (N+M-1)
Om = Om - np.pi
# plot results
plt.figure(figsize=(10, 4))
plt.stem(Om, np.abs(psd), basefmt='C0:', use_line_collection=True)
plt.title('Biased estimator of cross power spectral density')
plt.ylabel(r'$|\hat{\Phi}_{xy}(e^{j \Omega})|$')
plt.xlabel(r'$\Omega$')
plt.grid()
```
**Exercise**
* What does the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ tell you about the statistical properties of the two random signals?
Solution: The cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ is essential only non-zero for $\Omega=0$. It hence can be concluded that the two random signals are not mean-free and uncorrelated to each other.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
| github_jupyter |
# Implementation of VGG16
> In this notebook I have implemented VGG16 on CIFAR10 dataset using Pytorch
```
#importing libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import torch.optim as optim
import tqdm
import matplotlib.pyplot as plt
from torchvision.datasets import CIFAR10
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
```
Load the data and do standard preprocessing steps,such as resizing and converting the images into tensor
```
transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],
std=[0.229,0.224,0.225])])
train_ds = CIFAR10(root='data/',train = True,download=True,transform = transform)
val_ds = CIFAR10(root='data/',train = False,download=True,transform = transform)
batch_size = 128
train_loader = DataLoader(train_ds,batch_size,shuffle=True,num_workers=4,pin_memory=True)
val_loader = DataLoader(val_ds,batch_size,num_workers=4,pin_memory=True)
```
A custom utility class to print out the accuracy and losses during training and testing
```
def accuracy(outputs,labels):
_,preds = torch.max(outputs,dim=1)
return torch.tensor(torch.sum(preds==labels).item()/len(preds))
class ImageClassificationBase(nn.Module):
def training_step(self,batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out,labels)
return loss
def validation_step(self,batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out,labels)
acc = accuracy(out,labels)
return {'val_loss': loss.detach(),'val_acc': acc}
def validation_epoch_end(self,outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['train_loss'], result['val_loss'], result['val_acc']))
```
### Creating a network
```
VGG_types = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG_net(ImageClassificationBase):
def __init__(self, in_channels=3, num_classes=1000):
super(VGG_net, self).__init__()
self.in_channels = in_channels
self.conv_layers = self.create_conv_layers(VGG_types['VGG16'])
self.fcs = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes)
)
def forward(self, x):
x = self.conv_layers(x)
x = x.reshape(x.shape[0], -1)
x = self.fcs(x)
return x
def create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == int:
out_channels = x
layers += [nn.Conv2d(in_channels=in_channels,out_channels=out_channels,
kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(x),
nn.ReLU()]
in_channels = x
elif x == 'M':
layers += [nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))]
return nn.Sequential(*layers)
```
A custom function to pick a default device
```
def get_default_device():
"""Pick GPU if available else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data,device):
"""Move tensors to chosen device"""
if isinstance(data,(list,tuple)):
return [to_device(x,device) for x in data]
return data.to(device,non_blocking=True)
for images, labels in train_loader:
print(images.shape)
images = to_device(images,device)
print(images.device)
break
class DeviceDataLoader():
"""Wrap a DataLoader to move data to a device"""
def __init__(self,dl,device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data to a dataloader"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
train_loader = DeviceDataLoader(train_loader,device)
val_loader = DeviceDataLoader(val_loader,device)
model = VGG_net(in_channels=3,num_classes=10)
to_device(model,device)
```
### Training the model
```
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
train_losses =[]
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, val_loader)]
history
#history = fit(2,0.1,model,train_loader,val_loader)
```
| github_jupyter |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
# Model Zoo -- CNN Gender Classifier (ResNet-50 Architecture, CelebA) with Data Parallelism
### Network Architecture
The network in this notebook is an implementation of the ResNet-50 [1] architecture on the CelebA face dataset [2] to train a gender classifier.
References
- [1] He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778). ([CVPR Link](https://www.cv-foundation.org/openaccess/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html))
- [2] Zhang, K., Tan, L., Li, Z., & Qiao, Y. (2016). Gender and smile classification using deep convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (pp. 34-38).
**Note that the CelebA images are 218 x 178, not 256 x 256. We resize to 128x128**
The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches the dimensions of the main path's output, which allows the network to learn identity functions. Such a residual block is illustrated below:

The following code implements the residual blocks with skip connections such that the input passed via the shortcut matches is resized to dimensions of the main path's output. Such a residual block is illustrated below:

For a more detailed explanation see the other notebook, [resnet-ex-1.ipynb](resnet-ex-1.ipynb).
The image below illustrates the ResNet-34 architecture (from the He et al. paper):

While ResNet-34 has 34 layers as shown in the figure above, the 50-layer ResNet variant implemented in this notebook uses "bottleneck" approach instead of the basic residual blocks. Figure 5 from the He et al. paper illustrates the difference between a basic residual block (as used in ResNet-34) and the bottleneck block used in ResNet-50:

## Imports
```
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
```
## Dataset
### Downloading the Dataset
Note that the ~200,000 CelebA face image dataset is relatively large (~1.3 Gb). The download link provided below was provided by the author on the official CelebA website at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html.
1) Download and unzip the file `img_align_celeba.zip`, which contains the images in jpeg format.
2) Download the `list_attr_celeba.txt` file, which contains the class labels
3) Download the `list_eval_partition.txt` file, which contains training/validation/test partitioning info
### Preparing the Dataset
```
df1 = pd.read_csv('list_attr_celeba.txt', sep="\s+", skiprows=1, usecols=['Male'])
# Make 0 (female) & 1 (male) labels instead of -1 & 1
df1.loc[df1['Male'] == -1, 'Male'] = 0
df1.head()
df2 = pd.read_csv('list_eval_partition.txt', sep="\s+", skiprows=0, header=None)
df2.columns = ['Filename', 'Partition']
df2 = df2.set_index('Filename')
df2.head()
df3 = df1.merge(df2, left_index=True, right_index=True)
df3.head()
df3.to_csv('celeba-gender-partitions.csv')
df4 = pd.read_csv('celeba-gender-partitions.csv', index_col=0)
df4.head()
df4.loc[df4['Partition'] == 0].to_csv('celeba-gender-train.csv')
df4.loc[df4['Partition'] == 1].to_csv('celeba-gender-valid.csv')
df4.loc[df4['Partition'] == 2].to_csv('celeba-gender-test.csv')
img = Image.open('img_align_celeba/000001.jpg')
print(np.asarray(img, dtype=np.uint8).shape)
plt.imshow(img);
```
### Implementing a Custom DataLoader Class
```
class CelebaDataset(Dataset):
"""Custom Dataset for loading CelebA face images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df.index.values
self.y = df['Male'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
# Note that transforms.ToTensor()
# already divides pixels by 255. internally
custom_transform = transforms.Compose([transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
#transforms.Grayscale(),
#transforms.Lambda(lambda x: x/255.),
transforms.ToTensor()])
train_dataset = CelebaDataset(csv_path='celeba-gender-train.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
valid_dataset = CelebaDataset(csv_path='celeba-gender-valid.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
test_dataset = CelebaDataset(csv_path='celeba-gender-test.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
BATCH_SIZE=256*torch.cuda.device_count()
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=4)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
device = torch.device("cuda:0")
torch.manual_seed(0)
for epoch in range(2):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(device)
y = y.to(device)
break
```
## Model
```
##########################
### SETTINGS
##########################
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 5
# Architecture
num_features = 128*128
num_classes = 2
```
The following code cell that implements the ResNet-34 architecture is a derivative of the code provided at https://pytorch.org/docs/0.4.0/_modules/torchvision/models/resnet.html.
```
##########################
### MODEL
##########################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1, padding=2)
self.fc = nn.Linear(2048 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def resnet50(num_classes):
"""Constructs a ResNet-34 model."""
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
return model
torch.manual_seed(random_seed)
##########################
### COST AND OPTIMIZER
##########################
#### DATA PARALLEL START ####
model = resnet50(num_classes)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
#### DATA PARALLEL END ####
model.to(device)
#### DATA PARALLEL START ####
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
## Training
```
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_dataset)//BATCH_SIZE, cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_accuracy(model, valid_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
```
## Evaluation
```
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
plt.imshow(np.transpose(features[0], (1, 2, 0)))
model.eval()
logits, probas = model(features.to(device)[0, None])
print('Probability Female %.2f%%' % (probas[0][0]*100))
```
| github_jupyter |
# REINFORCE in PyTorch
Just like we did before for Q-learning, this time we'll design a PyTorch network to learn `CartPole-v0` via policy gradient (REINFORCE).
Most of the code in this notebook is taken from approximate Q-learning, so you'll find it more or less familiar and even simpler.
```
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
A caveat: with some versions of `pyglet`, the following cell may crash with `NameError: name 'base' is not defined`. The corresponding bug report is [here](https://github.com/pyglet/pyglet/issues/134). If you see this error, try restarting the kernel.
```
env = gym.make("CartPole-v0")
# gym compatibility: unwrap TimeLimit
if hasattr(env, '_max_episode_steps'):
env = env.env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
```
# Building the network for REINFORCE
For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.
For numerical stability, please __do not include the softmax layer into your network architecture__.
We'll use softmax or log-softmax where appropriate.
```
import torch
import torch.nn as nn
# Build a simple neural network that predicts policy logits.
# Keep it simple: CartPole isn't worth deep architectures.
model = nn.Sequential(
<YOUR CODE: define a neural network that predicts policy logits>
)
```
#### Predict function
Note: output value of this function is not a torch tensor, it's a numpy array.
So, here gradient calculation is not needed.
<br>
Use [no_grad](https://pytorch.org/docs/stable/autograd.html#torch.autograd.no_grad)
to suppress gradient calculation.
<br>
Also, `.detach()` (or legacy `.data` property) can be used instead, but there is a difference:
<br>
With `.detach()` computational graph is built but then disconnected from a particular tensor,
so `.detach()` should be used if that graph is needed for backprop via some other (not detached) tensor;
<br>
In contrast, no graph is built by any operation in `no_grad()` context, thus it's preferable here.
```
def predict_probs(states):
"""
Predict action probabilities given states.
:param states: numpy array of shape [batch, state_shape]
:returns: numpy array of shape [batch, n_actions]
"""
# convert states, compute logits, use softmax to get probability
<YOUR CODE>
return <YOUR CODE>
test_states = np.array([env.reset() for _ in range(5)])
test_probas = predict_probs(test_states)
assert isinstance(test_probas, np.ndarray), \
"you must return np array and not %s" % type(test_probas)
assert tuple(test_probas.shape) == (test_states.shape[0], env.action_space.n), \
"wrong output shape: %s" % np.shape(test_probas)
assert np.allclose(np.sum(test_probas, axis=1), 1), "probabilities do not sum to 1"
```
### Play the game
We can now use our newly built agent to play the game.
```
def generate_session(env, t_max=1000):
"""
Play a full session with REINFORCE agent.
Returns sequences of states, actions, and rewards.
"""
# arrays to record session
states, actions, rewards = [], [], []
s = env.reset()
for t in range(t_max):
# action probabilities array aka pi(a|s)
action_probs = predict_probs(np.array([s]))[0]
# Sample action with given probabilities.
a = <YOUR CODE>
new_s, r, done, info = env.step(a)
# record session history to train later
states.append(s)
actions.append(a)
rewards.append(r)
s = new_s
if done:
break
return states, actions, rewards
# test it
states, actions, rewards = generate_session(env)
```
### Computing cumulative rewards
$$
\begin{align*}
G_t &= r_t + \gamma r_{t + 1} + \gamma^2 r_{t + 2} + \ldots \\
&= \sum_{i = t}^T \gamma^{i - t} r_i \\
&= r_t + \gamma * G_{t + 1}
\end{align*}
$$
```
def get_cumulative_rewards(rewards, # rewards at each step
gamma=0.99 # discount for reward
):
"""
Take a list of immediate rewards r(s,a) for the whole session
and compute cumulative returns (a.k.a. G(s,a) in Sutton '16).
G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...
A simple way to compute cumulative rewards is to iterate from the last
to the first timestep and compute G_t = r_t + gamma*G_{t+1} recurrently
You must return an array/list of cumulative rewards with as many elements as in the initial rewards.
"""
<YOUR CODE>
return <YOUR CODE: array of cumulative rewards>
get_cumulative_rewards(rewards)
assert len(get_cumulative_rewards(list(range(100)))) == 100
assert np.allclose(
get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9),
[1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])
assert np.allclose(
get_cumulative_rewards([0, 0, 1, -2, 3, -4, 0], gamma=0.5),
[0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])
assert np.allclose(
get_cumulative_rewards([0, 0, 1, 2, 3, 4, 0], gamma=0),
[0, 0, 1, 2, 3, 4, 0])
print("looks good!")
```
#### Loss function and updates
We now need to define objective and update over policy gradient.
Our objective function is
$$ J \approx { 1 \over N } \sum_{s_i,a_i} G(s_i,a_i) $$
REINFORCE defines a way to compute the gradient of the expected reward with respect to policy parameters. The formula is as follows:
$$ \nabla_\theta \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \nabla_\theta \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
We can abuse PyTorch's capabilities for automatic differentiation by defining our objective function as follows:
$$ \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
When you compute the gradient of that function with respect to network weights $\theta$, it will become exactly the policy gradient.
```
def to_one_hot(y_tensor, ndims):
""" helper: take an integer vector and convert it to 1-hot matrix. """
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
y_one_hot = torch.zeros(
y_tensor.size()[0], ndims).scatter_(1, y_tensor, 1)
return y_one_hot
# Your code: define optimizers
optimizer = torch.optim.Adam(model.parameters(), 1e-3)
def train_on_session(states, actions, rewards, gamma=0.99, entropy_coef=1e-2):
"""
Takes a sequence of states, actions and rewards produced by generate_session.
Updates agent's weights by following the policy gradient above.
Please use Adam optimizer with default parameters.
"""
# cast everything into torch tensors
states = torch.tensor(states, dtype=torch.float32)
actions = torch.tensor(actions, dtype=torch.int32)
cumulative_returns = np.array(get_cumulative_rewards(rewards, gamma))
cumulative_returns = torch.tensor(cumulative_returns, dtype=torch.float32)
# predict logits, probas and log-probas using an agent.
logits = model(states)
probs = nn.functional.softmax(logits, -1)
log_probs = nn.functional.log_softmax(logits, -1)
assert all(isinstance(v, torch.Tensor) for v in [logits, probs, log_probs]), \
"please use compute using torch tensors and don't use predict_probs function"
# select log-probabilities for chosen actions, log pi(a_i|s_i)
log_probs_for_actions = torch.sum(
log_probs * to_one_hot(actions, env.action_space.n), dim=1)
# Compute loss here. Don't forgen entropy regularization with `entropy_coef`
entropy = <YOUR CODE>
loss = <YOUR CODE>
# Gradient descent step
<YOUR CODE>
# technical: return session rewards to print them later
return np.sum(rewards)
```
### The actual training
```
for i in range(100):
rewards = [train_on_session(*generate_session(env)) for _ in range(100)] # generate new sessions
print("mean reward:%.3f" % (np.mean(rewards)))
if np.mean(rewards) > 500:
print("You Win!") # but you can train even further
break
```
### Results & video
```
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) as env_monitor:
sessions = [generate_session(env_monitor) for _ in range(100)]
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from base64 import b64encode
from IPython.display import HTML
video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
video_path = video_paths[-1] # You can also try other indices
if 'google.colab' in sys.modules:
# https://stackoverflow.com/a/57378660/1214547
with video_path.open('rb') as fp:
mp4 = fp.read()
data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()
else:
data_url = str(video_path)
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url))
```
| github_jupyter |
# Equivalent layer technique for estimating total magnetization direction: Analysis of the result
## Importing libraries
```
% matplotlib inline
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import cPickle as pickle
import datetime
import timeit
import string as st
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from fatiando.gridder import regular
notebook_name = 'airborne_EQL_magdirection_RM_analysis.ipynb'
```
## Plot style
```
plt.style.use('ggplot')
```
## Importing my package
```
dir_modules = '../../../mypackage'
sys.path.append(dir_modules)
import auxiliary_functions as fc
```
## Loading model
```
with open('data/model_multi.pickle') as f:
model_multi = pickle.load(f)
```
## Loading observation points
```
with open('data/airborne_survey.pickle') as f:
airborne = pickle.load(f)
```
## Loading data set
```
with open('data/data_set.pickle') as f:
data = pickle.load(f)
```
## Loading results
```
with open('data/result_RM_airb.pickle') as f:
results = pickle.load(f)
```
## List of saved files
```
saved_files = []
```
## Observation area
```
print 'Area limits: \n x_max = %.1f m \n x_min = %.1f m \n y_max = %.1f m \n y_min = %.1f m' % (airborne['area'][1],
airborne['area'][0],
airborne['area'][3],
airborne['area'][2])
```
## Airborne survey information
```
print 'Shape : (%.0f,%.0f)'% airborne['shape']
print 'Number of data: %.1f' % airborne['N']
print 'dx: %.1f m' % airborne['dx']
print 'dy: %.1f m ' % airborne['dy']
```
## Properties of the model
### Main field
```
inc_gf,dec_gf = model_multi['main_field']
print'Main field inclination: %.1f degree' % inc_gf
print'Main field declination: %.1f degree' % dec_gf
```
### Magnetization direction
```
print 'Inclination: %.1f degree' % model_multi['inc_R']
print 'Declination: %.1f degree' % model_multi['dec_R']
inc_R,dec_R = model_multi['inc_R'],model_multi['dec_R']
```
### Coordinates equivalent sources
```
h = results['layer_depth']
shape_layer = (airborne['shape'][0],airborne['shape'][1])
xs,ys,zs = regular(airborne['area'],shape_layer,h)
```
## The best solution using L-curve
```
m_LM = results['magnetic_moment'][4]
inc_est = results['inc_est'][4]
dec_est = results['dec_est'][4]
mu = results['reg_parameter'][4]
phi = results['phi'][4]
print mu
```
## Visualization of the convergence
```
phi = (np.array(phi)/airborne['x'].size)
title_font = 22
bottom_font = 20
saturation_factor = 1.
plt.close('all')
plt.figure(figsize=(10,10), tight_layout=True)
plt.plot(phi,'b-',linewidth=1.5)
plt.title('Convergence', fontsize=title_font)
plt.xlabel('iteration', fontsize = title_font)
plt.ylabel('Goal function ', fontsize = title_font)
plt.tick_params(axis='both', which='major', labelsize=15)
file_name = 'figs/airborne/convergence_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Estimated magnetization direction
```
print (inc_est,dec_est)
print (inc_R,dec_R)
```
## Comparison between observed data and predicted data
```
pred = fc.tfa_layer(airborne['x'],airborne['y'],airborne['z'],
xs,ys,zs,inc_gf,dec_gf,m_LM,inc_est,dec_est)
res = pred - data['tfa_obs_RM_airb']
r_norm,r_mean,r_std = fc.residual(data['tfa_obs_RM_airb'],pred)
title_font = 22
bottom_font = 20
plt.figure(figsize=(28,11), tight_layout=True)
ranges = np.abs([data['tfa_obs_RM_airb'].max(),
data['tfa_obs_RM_airb'].min(),
pred.max(), pred.min()]).max()
ranges_r = np.abs([res.max(),res.min()]).max()
## Observed data plot
ax1=plt.subplot(1,4,1)
plt.title('Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
data['tfa_obs_RM_airb'].reshape(airborne['shape']),
30, cmap='viridis',vmin=-ranges, vmax=ranges)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
## Predicted data plot
ax2=plt.subplot(1,4,2)
plt.title('Predicted data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
pred.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges, vmax=ranges)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
## Residuals plot and histogram
ax3=plt.subplot(1,4,3)
plt.title('Residuals map', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
res.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('nT',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
ax4=plt.subplot(1,4,4)
plt.title('Histogram of residuals', fontsize =title_font)
plt.xlabel('Residuals (nT)', fontsize = title_font)
plt.ylabel('Frequency', fontsize = title_font)
plt.text(0.02, 0.97, "mean = {:.2f}\nstd = {:.2f} ".format(np.mean(res), np.std(res)),
horizontalalignment='left',
verticalalignment='top',
transform = ax4.transAxes, fontsize=bottom_font)
n, bins, patches = plt.hist(res,bins=30, normed=True, facecolor='black')
gauss = mlab.normpdf(bins, 0., 10.)
plt.plot(bins, gauss, 'r-', linewidth=4.)
ax4.set_xticks([-100.0,-50.,0.0,50.,100.0])
ax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
##
file_name = 'figs/airborne/data_fitting_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Positive magnetic-moment distribution
```
title_font = 22
bottom_font = 20
plt.close('all')
plt.figure(figsize=(10,10), tight_layout=True)
plt.title('Magnetic moment distribution', fontsize=title_font)
plt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer),
m_LM.reshape(shape_layer), 40, cmap='inferno')
cb = plt.colorbar(pad=0.01, aspect=40, shrink=1.0)
cb.set_label('$A.m^2$',size=bottom_font)
cb.ax.tick_params(labelsize=bottom_font)
plt.xlabel('y (km)', fontsize = title_font)
plt.ylabel('x (km)', fontsize = title_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
file_name = 'figs/airborne/magnetic_moment_positive_LM_NNLS_magRM'
plt.savefig(file_name+'.png',dpi=300)
saved_files.append(file_name+'.png')
plt.show()
```
## Figure for paper
```
#title_font = 17
title_font = 5
#bottom_font = 14
bottom_font = 4
hist_font = 5
height_per_width = 17./15.
plt.figure(figsize=(4.33,4.33*height_per_width), tight_layout=True)
ranges = np.abs([data['tfa_obs_RM_airb'].max(),
data['tfa_obs_RM_airb'].min(),
pred.max(), pred.min()]).max()
ranges_r = np.abs([res.max(),res.min()]).max()
## Observed data plot
ax1=plt.subplot(3,2,1)
plt.title('(a) Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
data['tfa_obs_RM_airb'].reshape(airborne['shape']),
30, cmap='viridis',vmin=-ranges, vmax=ranges)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(a) Observed data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
## Predicted data plot
ax2=plt.subplot(3,2,2)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
pred.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges, vmax=ranges)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(b) Predicted data', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
## Residuals plot and histogram
ax3=plt.subplot(3,2,3)
plt.contourf(1e-3*airborne['y'].reshape(airborne['shape']),
1e-3*airborne['x'].reshape(airborne['shape']),
res.reshape(airborne['shape']),
30, cmap='viridis', vmin=-ranges_r, vmax=ranges_r)
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('nT',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(c) Residuals', fontsize=title_font)
plt.xlabel('y (km)',fontsize = title_font)
plt.ylabel('x (km)',fontsize = title_font)
ax4= plt.subplot(3,2,4)
plt.text(0.02, 0.97, "mean = {:.2f}\nstd = {:.2f} ".format(np.mean(res), np.std(res)),
horizontalalignment='left',
verticalalignment='top',
transform = ax4.transAxes, fontsize=hist_font)
n, bins, patches = plt.hist(res,bins=20, normed=True, facecolor='black')
gauss = mlab.normpdf(bins, 0., 10.)
plt.plot(bins, gauss, 'r-', linewidth=1.)
ax4.set_xticks([-100.0,-50.,0.0,50.,100.0])
ax4.set_yticks([.0,.010,.020,.030,.040,.05,.06])
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(d) Histogram of residuals', fontsize =title_font)
plt.xlabel('Residuals (nT)', fontsize = title_font)
plt.ylabel('Frequency', fontsize = title_font)
ax5= plt.subplot(3,2,5)
plt.contourf(1e-3*ys.reshape(shape_layer),1e-3*xs.reshape(shape_layer),
m_LM.reshape(shape_layer)*1e-9, 30, cmap='inferno')
cbar = plt.colorbar(pad=0.01, aspect=20, shrink=1.0)
cbar.set_label('$10^{9}$ A$\cdot$m$^2$',size=title_font)
cbar.ax.tick_params(labelsize=bottom_font)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(e) Magnetic moment distribution', fontsize=title_font)
plt.xlabel('y (km)', fontsize = title_font)
plt.ylabel('x (km)', fontsize = title_font)
ax6= plt.subplot(3,2,6)
plt.plot(phi, 'b-',linewidth=1.0)
plt.tick_params(axis='both', which='major', labelsize=bottom_font)
plt.title('(f) Convergence', fontsize=title_font)
plt.xlabel('iteration', fontsize = title_font)
plt.ylabel('Goal function ', fontsize = title_font)
###########################################################################
#file_name = 'figs/airborne/results_compiled_LM_NNLS_magRM'
file_name = 'figs/airborne/Fig3'
plt.savefig(file_name+'.png',dpi=1200)
saved_files.append(file_name+'.png')
plt.savefig(file_name+'.eps',dpi=1200)
saved_files.append(file_name+'.eps')
plt.show()
```
| github_jupyter |
# BE 240 Lecture 4
# Sub-SBML
## Modeling diffusion, shared resources, and compartmentalized systems
## _Ayush Pandey_
```
# This notebook is designed to be converted to a HTML slide show
# To do this in the command prompt type (in the folder containing the notebook):
# jupyter nbconvert BE240_Lecture4_Sub-SBML.ipynb --to slides
```


# An example:
### Three different "subsystems" - each with its SBML model
### Another "signal in mixture" subsystem - models signal in the environment / mixture
### Using Sub-SBML we can obtain the combined model for such a system with
* transport across membrane
* shared resources : ATP, Ribosome etc
* resolve naming conflicts (Ribo, Ribosome, RNAP, RNAPolymerase etc.)

# Installing Sub-SBML
```
git clone https://github.com/BuildACell/subsbml.git
```
cd to `subsbml` directory then run the following command to install the package in your environment:
```
python setup.py install
```
# Dependencies:
1. python-libsbml : Run `pip install python-libsbml`, if you don't have it already. You probably already have this installed as it is also a dependency for bioscrape
1. A simulator: You will need a simulator of your choice to simulate the SBML models that Sub-SBML generates. Bioscrape is an example of a simulator and we will be using that for simulations.
# Update your bioscrape installation
From the bioscrape directory, run the following if you do not have a remote fork (your own Github fork of the original bioscrape repository - `biocircuits/bioscrape`. To list all remote repositories that your bioscrape directory is connected to you can run `git remote -v`. The `origin` in the next two commands corresponds to the biocircuits/bioscrape github repository (you should change it if your remote has a different name)
```
git pull origin master
python setup.py install
```
Update your BioCRNpyler installation as well - if you plan to use your own BioCRNpyler models with Sub-SBML. Run the same commands as for bioscrape from the BioCRNpyler directory.
## Sub-SBML notes:
## On "name" and "identifier":
> SBML elements can have a name and an identifier argument. A `name` is supposed to be a human readable name of the particular element in the model. On the other hand, an `identifier` is what the software tool reads. Hence, `identifier` argument in an SBML model is mandatory whereas `name` argument is optional.
Sub-SBML works with `name` arguments of various model components to figure out what components interact/get combined/shared etc. Bioscrape/BioCRNpyler and other common software tools generate SBML models with `name` arguments added to various components such as species, parameters. As an example, to combine two species, Sub-SBML looks at the names of the two species and if they are the same - they are combined together and given a new identifier but the name remains the same.
## A simple Sub-SBML use case:
A simple example where we have two different models : transcription and translation. Using Sub-SBML, we can combine these two together and run simulations.
```
# Import statements
from subsbml.Subsystem import createNewSubsystem, createSubsystem
import numpy as np
import pylab as plt
```
## Transcription Model:
Consider the following simple transcription-only model where $G$ is a gene, $T$ is a transcript, and $S$ is the signaling molecule.
We can write the following reduced order dynamics:
1. $G \xrightarrow[]{\rho_{tx}(G, S)} G + T$;
\begin{align}
\rho_{tx}(G, S) = G K_{X}\frac{S^{2}}{K_{S}^{2}+S^{2}}
\\
\end{align}
Here, $S$ is the inducer signal that cooperatively activates the transcription of the gene $G$. Since, this is a positive activation of the gene by the inducer, we have a positive proportional Hill function.
1. $T \xrightarrow[]{\delta} \varnothing$; massaction kinetics at rate $\delta$.
## Translation model:
1. $T \xrightarrow[]{\rho_{tl}(T)} T+X$;
\begin{align}
\rho_{tl}(T) = K_{TR} \frac{T}{K_{R} + T}
\\
\end{align}
Here $X$ is the protein species.
The lumped parameters $K_{TR}$ and $K_R$ model effects due to ribosome saturation. This is the similar Hill function as derived in the enzymatic reaction example.
1. $X \xrightarrow[]{\delta} \varnothing$; massaction kinetics at rate $\delta$.
```
# Import SBML models by creating Subsystem class objects
ss1 = createSubsystem('transcription_SBML_model.xml')
ss2 = createSubsystem('translation_SBML_model.xml')
ss1.renameSName('mRNA_T', 'T')
# Combine the two subsystems together
tx_tl_subsystem = ss1 + ss2
# The longer way to do the same thing:
# tx_tl_subsystem = createNewSubsystem()
# tx_tl_subsystem.combineSubsystems([ss1,ss2], verbose = True)
# Set signal concentration (input) - manually and get ID for protein X
X_id = tx_tl_subsystem.getSpeciesByName('X').getId()
# Writing a Subsystem to an SBML file (Export SBML)
_ = tx_tl_subsystem.writeSBML('txtl_ss.xml')
tx_tl_subsystem.setSpeciesAmount('S',10)
try:
# Simulate with Bioscrape and plot the result
timepoints = np.linspace(0,100,100)
results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 10')
tx_tl_subsystem.setSpeciesAmount('S',5)
results, _ = tx_tl_subsystem.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'S = 5')
plt.title('Protein X dynamics')
plt.ylabel('[X]')
plt.xlabel('Time')
plt.legend()
plt.show()
except:
print('Simulator not found')
# Viewing the change log for the changes that Sub-SBML made
# print(ss1.changeLog)
# print(ss2.changeLog)
print(tx_tl_subsystem.changeLog)
```
## Signal induction model:
1. $\varnothing \xrightarrow[]{\rho(I)} S$;
\begin{align}
\rho(S) = K_{0} \frac{I^2}{K_{I} + I^2}
\\
\end{align}
Here $S$ is the signal produced on induction by an inducer $I$.
The lumped parameters $K_{0}$ and $K_S$ model effects of cooperative production of the signal by the inducer. This is the similar Hill function as derived in the enzymatic reaction example.
```
ss3 = createSubsystem('signal_in_mixture.xml')
# Signal subsystem (production of signal molecule)
combined_ss = ss1 + ss2 + ss3
# Alternatively
combined_ss = createNewSubsystem()
combined_ss.combineSubsystems([ss1,ss2,ss3])
# Writing a Subsystem to an SBML file (Export SBML)
combined_ss.writeSBML('txtl_combined.xml')
# Set signal concentration (input) - manually and get ID for protein X
combined_ss.setSpeciesAmount('I',10)
X_id = combined_ss.getSpeciesByName('X').getId()
try:
# Simulate with Bioscrape and plot the result
timepoints = np.linspace(0,100,100)
results, _ = combined_ss.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 10')
combined_ss.setSpeciesAmount('I',2)
results, _ = combined_ss.simulateWithBioscrape(timepoints)
plt.plot(timepoints, results[X_id], linewidth = 3, label = 'I = 5')
plt.title('Protein X dynamics')
plt.ylabel('[X]')
plt.xlabel('Time')
plt.legend()
plt.show()
except:
print('Simulator not found')
combined_ss.changeLog
```
## What does Sub-SBML look for?
1. For compartments: if two compartments have the same `name` and the same `size` attributes => they are combined together.
1. For species: if two species have the same `name` attribute => they are combined together. If initial amount is not the same, the first amount is set. It is easy to set species amounts later.
1. For parameters: if two paraemters have the same `name` attribute **and** the same `value` => they are combined together.
1. For reactions: if two reactions have the same `name` **and** the same reaction string (reactants -> products) => they are combined together.
1. Other SBML components are also merged.
# Utility functions for Subsystems
1. Set `verbose` keyword argument to `True` to get a list of detailed warning messages that describe the changes being made to the models. Helpful in debugging and creating clean models when combining multiple models.
1. Use `renameSName` method for a `Subsystem` to rename any species' names throughout a model and `renameSIdRefs` to rename identifiers.
1. Use `createBasicSubsystem()` function to get a basic "empty" subsystem model.
1. Use `getSpeciesByName` to get all species with a given name in a Subsystem model.
1. use `shareSubsystems` method similar to `combineSubsystems` method if you are only interested in getting a model with shared resource species combined together.
1. Set `combineNames` keyword argument to `False` in `combineSubsystems` method to combine the Subsystem objects but treating the elements with the same `name` as different.
# Modeling transport across membranes

## System 1 : TX-TL with IPTG reservoir and no membrane
```
from subsbml.System import System, combineSystems
cell_1 = System('cell_1')
ss1 = createSubsystem('txtl_ss.xml')
ss1.renameSName('S', 'IPTG')
ss2 = createSubsystem('IPTG_reservoir.xml')
IPTG_external_conc = ss2.getSpeciesByName('IPTG').getInitialConcentration()
cell_1.setInternal([ss1])
cell_1.setExternal([ss2])
# cell_1.setMembrane() # Membrane-less system
ss1.setSpeciesAmount('IPTG', IPTG_external_conc)
cell_1_model = cell_1.getModel() # Get a Subsystem object that represents the combined model for cell_1
cell_1_model.writeSBML('cell_1_model.xml')
```
## System 2 : TX-TL with IPTG reservoir and a simple membrane
### Membrane : IPTG external and internal diffusion in a one step reversible reaction
```
from subsbml import System, createSubsystem, combineSystems, createNewSubsystem
ss1 = createSubsystem('txtl_ss.xml')
ss1.renameSName('S','IPTG')
ss2 = createSubsystem('IPTG_reservoir.xml')
# Create a simple IPTG membrane where IPTG goes in an out of the membrane via a reversible reaction
mb2 = createSubsystem('membrane_IPTG.xml', membrane = True)
# cell_2 = System('cell_2',ListOfInternalSubsystems = [ss1],
# ListOfExternalSubsystems = [ss2],
# ListOfMembraneSubsystems = [mb2])
cell_2 = System('cell_2')
cell_2.setInternal(ss1)
cell_2.setExternal(ss2)
cell_2.setMembrane(mb2)
cell_2_model = cell_2.getModel()
cell_2_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_2_external')
cell_2_model.writeSBML('cell_2_model.xml')
```
## System 3 : TX-TL with IPTG reservoir and a detailed membrane diffusion
### Membrane : IPTG external binds to a transport protein and forms a complex. This complex causes the diffusion of IPTG in the internal of the cell.
```
# Create a more detailed IPTG membrane where IPTG binds to an intermediate transporter protein, forms a complex
# then transports out of the cell system to the external environment
mb3 = createSubsystem('membrane_IPTG_detailed.xml', membrane = True)
cell_3 = System('cell_3',ListOfInternalSubsystems = [ss1],
ListOfExternalSubsystems = [ss2],
ListOfMembraneSubsystems = [mb3])
cell_3_model = cell_3.getModel()
cell_3_model.setSpeciesAmount('IPTG', 1e4, compartment = 'cell_3_external')
cell_3_model.writeSBML('cell_3_model.xml')
combined_model = combineSystems([cell_1, cell_2, cell_3])
try:
import numpy as np
import matplotlib.pyplot as plt
timepoints = np.linspace(0,2,100)
results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)
results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)
results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)
X_id1 = cell_1_model.getSpeciesByName('X').getId()
X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()
X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()
plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')
plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')
plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')
plt.xlabel('Time')
plt.ylabel('[X]')
plt.legend()
plt.show()
timepoints = np.linspace(0,200,100)
results_1, _ = cell_1_model.simulateWithBioscrape(timepoints)
results_2, _ = cell_2_model.simulateWithBioscrape(timepoints)
results_3, _ = cell_3_model.simulateWithBioscrape(timepoints)
X_id1 = cell_1_model.getSpeciesByName('X').getId()
X_id2 = cell_2_model.getSpeciesByName('X', compartment = 'cell_2_internal').getId()
X_id3 = cell_3_model.getSpeciesByName('X', compartment = 'cell_3_internal').getId()
plt.plot(timepoints, results_1[X_id1], linewidth = 3, label = 'No membrane')
plt.plot(timepoints, results_2[X_id2], linewidth = 3, label = 'Simple membrane')
plt.plot(timepoints, results_3[X_id3], linewidth = 3, label = 'Advanced membrane')
plt.xlabel('Time')
plt.ylabel('[X]')
plt.legend()
plt.show()
except:
print('Simulator not found')
```
# Additional Sub-SBML Tools:
* Create SBML models directly using `SimpleModel` class
* Simulate directly using `bioscrape` or `libRoadRunner` with various simulation options
* Various utility functions to edit SBML models:
1. Change species names/identifiers throughout an SBML model.
1. Edit parameter values or species initial conditions easily (directly in an SBML model).
* `combineSystems` function can be used to combine multiple `System` objects together as shown in the previous cell. Also, a special use case interaction modeling function is available : `connectSubsystems`. Refer to the tutorial_interconnetion.ipynb notebook in the tutorials directory for more information about this.
# Things to Try:
1. Compartmentalize your own SBML model - generate more than 1 model each with a different compartment names. Using tools in this notebook, try to combine your models together and regenerate the expected simulation.
1. Implement a diffusion model and use it as a membrane model for a `System` of your choice.
1. Implement an even more complicated diffusion model for the above example and run the simulation.
1. **The package has not been tested extensively. So, it would be really great if you could raise [issues](https://github.com/BuildACell/subsbml/issues) on Github if you face any errors with your models. Also, feel free to send a message on Slack channel or DM.**
| github_jupyter |
# Examples of usage of Gate Angle Placeholder
The word "Placeholder" is used in Qubiter (we are in good company, Tensorflow uses this word in the same way) to mean a variable for which we delay/postpone assigning a numerical value (evaluating it) until a later time. In the case of Qubiter, it is useful to define gates with placeholders standing for angles. One can postpone evaluating those placeholders until one is ready to call the circuit simulator, and then pass the values of the placeholders as an argument to the simulator’s constructor. Placeholders of this type can be useful, for example, with quantum neural nets (QNNs). In some QNN algorithms, the circuit gate structure is fixed but the angles of the gates are varied many times, gradually, trying to lower a cost function each time.
> In Qubiter, legal variable names must be of form `#3` or `-#3` or `#3*.5` or
`-#3*.5` where 3 can be replaced by any non-negative int, and .5 can
be replaced by anything that can be an argument of float() without
throwing an exception. In this example, the 3 that follows the hash
character is called the variable number
>NEW! (functional placeholder variables)
Now legal variable names can ALSO be of the form `my_fun#1#2` or
`-my_fun#1#2`, where
* the 1 and 2 can be replaced by any non-negative integers and there
might be any number > 0 of hash variables. Thus, there need not
always be precisely 2 hash variables as in the example.
* `my_fun` can be replaced by the name of any function with one or
more input floats (2 inputs in the example), as long as the first
character of the function's name is a lower case letter.
>The strings `my_fun#1#2` or `-my_fun#1#2` indicate than one wants to
use for the angle being replaced, the values of `my_fun(#1, #2)` or
`-my_fun(#1, #2)`, respectively, where the inputs #1 and #2 are
floats standing for radians and the output is also a float standing
for radians.
```
import os
import sys
print(os.getcwd())
os.chdir('../../')
print(os.getcwd())
sys.path.insert(0,os.getcwd())
```
We begin by writing a simple circuit with 4 qubits. As usual, the following code will
write an English and a Picture file in the `io_folder` directory. Note that some
angles have been entered into the write() Python functions as legal
variable names instead of floats. In the English file, you will see those legal
names where the numerical values of those angles would have been.
```
from qubiter.SEO_writer import *
from qubiter.SEO_reader import *
from qubiter.EchoingSEO_reader import *
from qubiter.SEO_simulator import *
num_bits = 4
file_prefix = 'placeholder_test'
emb = CktEmbedder(num_bits, num_bits)
wr = SEO_writer(file_prefix, emb)
wr.write_Rx(2, rads=np.pi/7)
wr.write_Rx(1, rads='#2*.5')
wr.write_Rx(1, rads='my_fun1#2')
wr.write_Rn(3, rads_list=['#1', '-#1*3', '#3'])
wr.write_Rx(1, rads='-my_fun2#2#1')
wr.write_cnot(2, 3)
wr.close_files()
```
The following 2 files were just written:
1. <a href='../io_folder/placeholder_test_4_eng.txt'>../io_folder/placeholder_test_4_eng.txt</a>
2. <a href='../io_folder/placeholder_test_4_ZLpic.txt'>../io_folder/placeholder_test_4_ZLpic.txt</a>
Simply by creating an object of the class SEO_reader with the flag `write_log` set equal to True, you can create a log file which contains
* a list of distinct variable numbers
* a list of distinct function names
encountered in the English file
```
rdr = SEO_reader(file_prefix, num_bits, write_log=True)
```
The following log file was just written:
<a href='../io_folder/placeholder_test_4_log.txt'>../io_folder/placeholder_test_4_log.txt</a>
Next, let us create two functions that will be used for the functional placeholders
```
def my_fun1(x):
return x*.5
def my_fun2(x, y):
return x + y
```
**Partial Substitution**
This creates new files
with `#1=30`, `#2=60`, `'my_fun1'->my_fun1`,
but `#3` and `'my_fun2'` still undecided
```
vman = PlaceholderManager(eval_all_vars=False,
var_num_to_rads={1: np.pi/6, 2: np.pi/3},
fun_name_to_fun={'my_fun1': my_fun1})
wr = SEO_writer(file_prefix + '_eval01', emb)
EchoingSEO_reader(file_prefix, num_bits, wr,
vars_manager=vman)
```
The following 2 files were just written:
1. <a href='../io_folder/placeholder_test_eval01_4_eng.txt'>../io_folder/placeholder_test_eval01_4_eng.txt</a>
2. <a href='../io_folder/placeholder_test_eval01_4_ZLpic.txt'>../io_folder/placeholder_test_eval01_4_ZLpic.txt</a>
The following code runs the simulator after substituting
`#1=30`, `#2=60`, `#3=90`, `'my_fun1'->my_fun1`, `'my_fun2'->my_fun2`
```
vman = PlaceholderManager(
var_num_to_rads={1: np.pi/6, 2: np.pi/3, 3: np.pi/2},
fun_name_to_fun={'my_fun1': my_fun1, 'my_fun2': my_fun2}
)
sim = SEO_simulator(file_prefix, num_bits, verbose=False,
vars_manager=vman)
StateVec.describe_st_vec_dict(sim.cur_st_vec_dict)
```
| github_jupyter |
# The art of using pipelines
Pipelines are a natural way to think about a machine learning system. Indeed with some practice a data scientist can visualise data "flowing" through a series of steps. The input is typically some raw data which has to be processed in some manner. The goal is to represent the data in such a way that is can be ingested by a machine learning algorithm. Along the way some steps will extract features, while others will normalize the data and remove undesirable elements. Pipelines are simple, and yet they are a powerful way of designing sophisticated machine learning systems.
Both [scikit-learn](https://stackoverflow.com/questions/33091376/python-what-is-exactly-sklearn-pipeline-pipeline) and [pandas](https://tomaugspurger.github.io/method-chaining) make it possible to use pipelines. However it's quite rare to see pipelines being used in practice (at least on Kaggle). Sometimes you get to see people using scikit-learn's `pipeline` module, however the `pipe` method from `pandas` is sadly underappreciated. A big reason why pipelines are not given much love is that it's easier to think of batch learning in terms of a script or a notebook. Indeed many people doing data science seem to prefer a procedural style to a declarative style. Moreover in practice pipelines can be a bit rigid if one wishes to do non-orthodox operations.
Although pipelines may be a bit of an odd fit for batch learning, they make complete sense when they are used for online learning. Indeed the UNIX philosophy has advocated the use of pipelines for data processing for many decades. If you can visualise data as a stream of observations then using pipelines should make a lot of sense to you. We'll attempt to convince you by writing a machine learning algorithm in a procedural way and then converting it to a declarative pipeline in small steps. Hopefully by the end you'll be convinced, or not!
In this notebook we'll manipulate data from the [Kaggle Recruit Restaurants Visitor Forecasting competition](https://www.kaggle.com/c/recruit-restaurant-visitor-forecasting). The data is directly available through `river`'s `datasets` module.
```
from pprint import pprint
from river import datasets
for x, y in datasets.Restaurants():
pprint(x)
pprint(y)
break
```
We'll start by building and running a model using a procedural coding style. The performance of the model doesn't matter, we're simply interested in the design of the model.
```
from river import feature_extraction
from river import linear_model
from river import metrics
from river import preprocessing
from river import stats
means = (
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
)
scaler = preprocessing.StandardScaler()
lin_reg = linear_model.LinearRegression()
metric = metrics.MAE()
for x, y in datasets.Restaurants():
# Derive date features
x['weekday'] = x['date'].weekday()
x['is_weekend'] = x['date'].weekday() in (5, 6)
# Process the rolling means of the target
for mean in means:
x = {**x, **mean.transform_one(x)}
mean.learn_one(x, y)
# Remove the key/value pairs that aren't features
for key in ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']:
x.pop(key)
# Rescale the data
x = scaler.learn_one(x).transform_one(x)
# Fit the linear regression
y_pred = lin_reg.predict_one(x)
lin_reg.learn_one(x, y)
# Update the metric using the out-of-fold prediction
metric.update(y, y_pred)
print(metric)
```
We're not using many features. We can print the last `x` to get an idea of the features (don't forget they've been scaled!)
```
pprint(x)
```
The above chunk of code is quite explicit but it's a bit verbose. The whole point of libraries such as `river` is to make life easier for users. Moreover there's too much space for users to mess up the order in which things are done, which increases the chance of there being target leakage. We'll now rewrite our model in a declarative fashion using a pipeline *à la sklearn*.
```
from river import compose
def get_date_features(x):
weekday = x['date'].weekday()
return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}
model = compose.Pipeline(
('features', compose.TransformerUnion(
('date_features', compose.FuncTransformer(get_date_features)),
('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),
('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),
('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))
)),
('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),
('scale', preprocessing.StandardScaler()),
('lin_reg', linear_model.LinearRegression())
)
metric = metrics.MAE()
for x, y in datasets.Restaurants():
# Make a prediction without using the target
y_pred = model.predict_one(x)
# Update the model using the target
model.learn_one(x, y)
# Update the metric using the out-of-fold prediction
metric.update(y, y_pred)
print(metric)
```
We use a `Pipeline` to arrange each step in a sequential order. A `TransformerUnion` is used to merge multiple feature extractors into a single transformer. The `for` loop is now much shorter and is thus easier to grok: we get the out-of-fold prediction, we fit the model, and finally we update the metric. This way of evaluating a model is typical of online learning, and so we put it wrapped it inside a function called `progressive_val_score` part of the `evaluate` module. We can use it to replace the `for` loop.
```
from river import evaluate
model = compose.Pipeline(
('features', compose.TransformerUnion(
('date_features', compose.FuncTransformer(get_date_features)),
('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7))),
('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14))),
('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)))
)),
('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),
('scale', preprocessing.StandardScaler()),
('lin_reg', linear_model.LinearRegression())
)
evaluate.progressive_val_score(dataset=datasets.Restaurants(), model=model, metric=metrics.MAE())
```
Notice that you couldn't have used the `progressive_val_score` method if you wrote the model in a procedural manner.
Our code is getting shorter, but it's still a bit difficult on the eyes. Indeed there is a lot of boilerplate code associated with pipelines that can get tedious to write. However `river` has some special tricks up it's sleeve to save you from a lot of pain.
The first trick is that the name of each step in the pipeline can be omitted. If no name is given for a step then `river` automatically infers one.
```
model = compose.Pipeline(
compose.TransformerUnion(
compose.FuncTransformer(get_date_features),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)),
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
),
compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),
preprocessing.StandardScaler(),
linear_model.LinearRegression()
)
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Under the hood a `Pipeline` inherits from `collections.OrderedDict`. Indeed this makes sense because if you think about it a `Pipeline` is simply a sequence of steps where each step has a name. The reason we mention this is because it means you can manipulate a `Pipeline` the same way you would manipulate an ordinary `dict`. For instance we can print the name of each step by using the `keys` method.
```
for name in model.steps:
print(name)
```
The first step is a `FeatureUnion` and it's string representation contains the string representation of each of it's elements. Not having to write names saves up some time and space and is certainly less tedious.
The next trick is that we can use mathematical operators to compose our pipeline. For example we can use the `+` operator to merge `Transformer`s into a `TransformerUnion`.
```
model = compose.Pipeline(
compose.FuncTransformer(get_date_features) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) + \
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21)),
compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),
preprocessing.StandardScaler(),
linear_model.LinearRegression()
)
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Likewhise we can use the `|` operator to assemble steps into a `Pipeline`.
```
model = (
compose.FuncTransformer(get_date_features) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(7)) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(14)) +
feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(21))
)
to_discard = ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']
model = model | compose.Discard(*to_discard) | preprocessing.StandardScaler()
model |= linear_model.LinearRegression()
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Hopefully you'll agree that this is a powerful way to express machine learning pipelines. For some people this should be quite remeniscent of the UNIX pipe operator. One final trick we want to mention is that functions are automatically wrapped with a `FuncTransformer`, which can be quite handy.
```
model = get_date_features
for n in [7, 14, 21]:
model += feature_extraction.TargetAgg(by='store_id', how=stats.RollingMean(n))
model |= compose.Discard(*to_discard)
model |= preprocessing.StandardScaler()
model |= linear_model.LinearRegression()
evaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())
```
Naturally some may prefer the procedural style we first used because they find it easier to work with. It all depends on your style and you should use what you feel comfortable with. However we encourage you to use operators because we believe that this will increase the readability of your code, which is very important. To each their own!
Before finishing we can take an interactive look at our pipeline.
```
model
```
| github_jupyter |
# Photometric Plugin
For optical photometry, we provide the **PhotometryLike** plugin that handles forward folding of a spectral model through filter curves. Let's have a look at the avaiable procedures.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from threeML import *
# we will need XPSEC models for extinction
from astromodels.xspec import *
# The filter library takes a while to load so you must import it explicitly..
from threeML.plugins.photometry.filter_library import threeML_filter_library
```
## Setup
We use [speclite](http://speclite.readthedocs.io/en/latest/ ) to handle optical filters.
Therefore, you can easily build your own custom filters, use the built in speclite filters, or use the 3ML filter library that we have built thanks to [Spanish Virtual Observatory](http://svo.cab.inta-csic.es/main/index.php).
**If you use these filters, please be sure to cite the proper sources!**
### Simple example of building a filter
Let's say we have our own 1-m telescope with a Johnson filter and we happen to record the data. We also have simultaneous data at other wavelengths and we want to compare. Let's setup the optical plugin (we'll ignore the other data for now).
```
import speclite.filters as spec_filters
my_backyard_telescope_filter = spec_filters.load_filter('bessell-r')
# NOTE:
my_backyard_telescope_filter.name
```
NOTE: the filter name is 'bessell-R'. The plugin will look for the name *after* the **'-'** i.e 'R'
Now let's build a 3ML plugin via **PhotometryLike**.
Our data are entered as keywords with the name of the filter as the keyword and the data in an magnitude,error tuple, i.e. R=(mag,mag_err):
```
my_backyard_telescope = PhotometryLike('backyard_astronomy',
filters=my_backyard_telescope_filter, # the filter
R=(20,.1) ) # the magnitude and error
my_backyard_telescope.display_filters()
```
## 3ML filter library
Explore the filter library. If you cannot find what you need, it is simple to add your own
```
threeML_filter_library.SLOAN
spec_filters.plot_filters(threeML_filter_library.SLOAN.SDSS)
spec_filters.plot_filters(threeML_filter_library.Herschel.SPIRE)
spec_filters.plot_filters(threeML_filter_library.Keck.NIRC2)
```
## Build your own filters
Following the example from speclite, we can build our own filters and add them:
```
fangs_g = spec_filters.FilterResponse(
wavelength = [3800, 4500, 5200] * u.Angstrom,
response = [0, 0.5, 0], meta=dict(group_name='fangs', band_name='g'))
fangs_r = spec_filters.FilterResponse(
wavelength = [4800, 5500, 6200] * u.Angstrom,
response = [0, 0.5, 0], meta=dict(group_name='fangs', band_name='r'))
fangs = spec_filters.load_filters('fangs-g', 'fangs-r')
fangslike = PhotometryLike('fangs',filters=fangs,g=(20,.1),r=(18,.1))
fangslike.display_filters()
```
## GROND Example
Now we will look at GROND. We get the filter from the 3ML filter library.
(Just play with tab completion to see what is available!)
```
grond = PhotometryLike('GROND',
filters=threeML_filter_library.ESO.GROND,
#g=(21.5.93,.23), # we exclude these filters
#r=(22.,0.12),
i=(21.8,.01),
z=(21.2,.01),
J=(19.6,.01),
H=(18.6,.01),
K=(18.,.01))
grond.display_filters()
```
### Model specification
Here we use XSPEC's dust extinction models for the milky way and the host
```
spec = Powerlaw() * XS_zdust() * XS_zdust()
data_list = DataList(grond)
model = Model(PointSource('grb',0,0,spectral_shape=spec))
spec.piv_1 = 1E-2
spec.index_1.fix=False
spec.redshift_2 = 0.347
spec.redshift_2.fix = True
spec.e_bmv_2 = 5./2.93
spec.e_bmv_2.fix = True
spec.rv_2 = 2.93
spec.rv_2.fix = True
spec.method_2 = 3
spec.method_2.fix=True
spec.e_bmv_3 = .002/3.08
spec.e_bmv_3.fix = True
spec.rv_3= 3.08
spec.rv_3.fix=True
spec.redshift_3 = 0
spec.redshift_3.fix=True
spec.method_3 = 1
spec.method_3.fix=True
jl = JointLikelihood(model,data_list)
```
We compute $m_{\rm AB}$ from astromodels photon fluxes. This is done by convolving the differential flux over the filter response:
$ F[R,f_\lambda] \equiv \int_0^\infty \frac{dg}{d\lambda}(\lambda)R(\lambda) \omega(\lambda) d\lambda$
where we have converted the astromodels functions to wavelength properly.
```
_ = jl.fit()
```
We can now look at the fit in magnitude space or model space as with any plugin.
```
_=display_photometry_model_magnitudes(jl)
_ = plot_point_source_spectra(jl.results,flux_unit='erg/(cm2 s keV)',
xscale='linear',
energy_unit='nm',ene_min=1E3, ene_max=1E5, num_ene=200 )
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Tensorflow Lite Gesture Classification Example Conversion Script
This guide shows how you can go about converting the model trained with TensorFlowJS to TensorFlow Lite FlatBuffers.
Run all steps in-order. At the end, `model.tflite` file will be downloaded.
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/mobile/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/mobile/examples/gesture_classification/ml/tensorflowjs_to_tflite_colab_notebook.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
</table>
**Install Dependencies**
```
!pip3 install tensorflow==1.14.0 keras==2.2.4 tensorflowjs==0.6.4 --force-reinstall
import traceback
import logging
import tensorflow.compat.v1 as tf
import keras.backend as K
import os
from google.colab import files
from keras import Model, Input
from keras.applications import MobileNet
from keras.engine.saving import load_model
from tensorflowjs.converters import load_keras_model
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
```
***Cleanup any existing models if necessary***
```
!rm -rf *.h5 *.tflite *.json *.bin
```
**Upload your Tensorflow.js Artifacts Here**
i.e., The weights manifest **model.json** and the binary weights file **model-weights.bin**
```
files.upload()
```
**Export Configuration**
```
#@title Export Configuration
# TensorFlow.js arguments
config_json = "model.json" #@param {type:"string"}
weights_path_prefix = None #@param {type:"raw"}
model_tflite = "model.tflite" #@param {type:"string"}
```
**Model Converter**
The following class converts a TensorFlow.js model to a TFLite FlatBuffer
```
class ModelConverter:
"""
Creates a ModelConverter class from a TensorFlow.js model file.
Args:
:param config_json_path: Full filepath of weights manifest file containing the model architecture.
:param weights_path_prefix: Full filepath to the directory in which the weights binaries exist.
:param tflite_model_file: Name of the TFLite FlatBuffer file to be exported.
:return:
ModelConverter class.
"""
def __init__(self,
config_json_path,
weights_path_prefix,
tflite_model_file
):
self.config_json_path = config_json_path
self.weights_path_prefix = weights_path_prefix
self.tflite_model_file = tflite_model_file
self.keras_model_file = 'merged.h5'
# MobileNet Options
self.input_node_name = 'the_input'
self.image_size = 224
self.alpha = 0.25
self.depth_multiplier = 1
self._input_shape = (1, self.image_size, self.image_size, 3)
self.depthwise_conv_layer = 'conv_pw_13_relu'
def convert(self):
self.save_keras_model()
self._deserialize_tflite_from_keras()
logger.info('The TFLite model has been generated')
self._purge()
def save_keras_model(self):
top_model = load_keras_model(self.config_json_path, self.weights_path_prefix,
weights_data_buffers=None,
load_weights=True,
use_unique_name_scope=True)
base_model = self.get_base_model()
merged_model = self.merge(base_model, top_model)
merged_model.save(self.keras_model_file)
logger.info("The merged Keras HDF5 model has been saved as {}".format(self.keras_model_file))
def merge(self, base_model, top_model):
"""
Merges base model with the classification block
:return: Returns the merged Keras model
"""
logger.info("Initializing model...")
layer = base_model.get_layer(self.depthwise_conv_layer)
model = Model(inputs=base_model.input, outputs=top_model(layer.output))
logger.info("Model created.")
return model
def get_base_model(self):
"""
Builds MobileNet with the default parameters
:return: Returns the base MobileNet model
"""
input_tensor = Input(shape=self._input_shape[1:], name=self.input_node_name)
base_model = MobileNet(input_shape=self._input_shape[1:],
alpha=self.alpha,
depth_multiplier=self.depth_multiplier,
input_tensor=input_tensor,
include_top=False)
return base_model
def _deserialize_tflite_from_keras(self):
converter = tf.lite.TFLiteConverter.from_keras_model_file(self.keras_model_file)
tflite_model = converter.convert()
with open(self.tflite_model_file, "wb") as file:
file.write(tflite_model)
def _purge(self):
logger.info('Cleaning up Keras model')
os.remove(self.keras_model_file)
try:
K.clear_session()
converter = ModelConverter(config_json,
weights_path_prefix,
model_tflite)
converter.convert()
except ValueError as e:
print(traceback.format_exc())
print("Error occurred while converting")
files.download(model_tflite)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
m = 50 # 5, 50, 100, 500, 1000, 2000
desired_num = 200
tr_i = 0
tr_j = int(desired_num/2)
tr_k = desired_num
tr_i, tr_j, tr_k
```
# Generate dataset
```
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [5,5],cov=[[0.1,0],[0,0.1]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [-6,7],cov=[[0.1,0],[0,0.1]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [-5,-4],cov=[[0.1,0],[0,0.1]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [-1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [0,2],cov=[[0.1,0],[0,0.1]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [0,-1],cov=[[0.1,0],[0,0.1]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [0,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [-0.5,-0.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [0.4,0.2],cov=[[0.1,0],[0,0.1]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_11 = MosaicDataset(test_dataset, labels )
testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,3)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_11]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
| github_jupyter |
# Introduction to Convolutional Neural Networks (CNNs) in PyTorch
### Representing images digitally
While convolutional neural networks (CNNs) see a wide variety of uses, they were originally designed for images, and CNNs are still most commonly used for vision-related tasks.
For today, we'll primarily be focusing on CNNs for images.
Before we dive into convolutions and neural networks, it's worth prefacing with how images are represented by a computer, as this understanding will inform some of our design choices.
Previously, we saw an example of a digitized MNIST handwritten digit.
Specifically, we represent it as an $H \times W$ table, with the value of each element storing the intensity of the corresponding pixel.
<img src="./Figures/mnist_digital.png" alt="mnist_digital" style="width: 600px;"/>
With a 2D representation as above, we for the most part can only efficiently represent grayscale images.
What if we want color?
There are many schemes for storing color, but one of the most common ones is the [RGB color model](https://en.wikipedia.org/wiki/RGB_color_model).
In such a system, we store 3 tables of pixel intensities (each called a *channel*), one each for the colors red, green, and blue (hence RGB), resulting in an $H \times W \times 3$ tensor.
Pixel values for a particular channel indicate how much of the corresponding color the image has at a particular location.
## Let's load an image and look at different channels:
```
%matplotlib inline
import imageio
import matplotlib.pyplot as plt
# Read the image "./Figures/chapel.jpg" from the disk.
# Hint: use `im = imageio.imread(<Path to the image>)`.
# Print the shape of the tensor
# Display the image
```
We can see that the image we loaded has height and width of $620 \times 1175$, with 3 channels corresponding to RGB.
We can easily slice out and view individual color channels:
```
# Uncomment the following command to extract the red channel of the above image.
# im_red = im[:,:,0]
# Display the image
# Hint: To display the pixel values for a single channel, we can display the image using the gray-scale colormap
# Repeat the above for the blue channel to visualize features represented in the blue color channel.
```
While we have so far considered only 3 channel RGB images, there are many settings in which we may consider a different number of channels.
For example, [hyperspectral imaging](https://en.wikipedia.org/wiki/Hyperspectral_imaging) uses a wide range of the electromagnetic spectrum to characterize a scene.
Such modalities may have hundreds of channels or more.
Additionally, we'll soon see that certain intermediate representations in a CNN can be considered images with many channels.
### Convolutions
Convolutional neural networks (CNNs) are a class of neural networks that have convolutional layers.
CNNs are particularly effective for data that have spatial structures and correlations (e.g. images).
We'll focus on CNNs applied to images in this tutorial.
Recall that a multilayer perceptron (MLP) is entirely composed of fully connected layers, which are each a matrix multiply operation (and addition of a bias) followed by a non-linearity (e.g. sigmoid, ReLU).
A convolutional layer is similar, except the matrix multiply operation is replaced with a convolution operation (in practice a cross-correlation).
Note that a CNN need not be entirely composed of convolutional layers; in fact, many popular CNN architectures end in fully connected layers.
As before, since we're building neural networks, let's start by loading PyTorch. We'll find NumPy useful as well, so we'll also import that here.
```
import numpy as np
# PyTorch Imports
##################################################
# #
# ---- YOUR CODE HERE ---- #
# #
##################################################
```
#### Review: Fully connected layer
In a fully connected layer, the input $x \in \mathbb R^{M \times C_{in}}$ is a vector (or, rather a batch of vectors), where $M$ is the minibatch size and $C_{in}$ is the dimensionality of the input.
We first matrix multiply the input $x$ by a weight matrix $W$.
This weight matrix has dimensions $W \in \mathbb R^{C_{in} \times C_{out}}$, where $C_{out}$ is the number of output units.
We then add a bias for each output, which we do by adding $b \in \mathbb{R}^{C_{out}}$.
The output $y \in \mathbb{R}^{M \times C_{out}}$ of the fully connected layer then:
\begin{align*}
y = \text{ReLU}(x W + b)
\end{align*}
Remember, the values of $W$ and $b$ are variables that we are trying to learn for our model.
Below we have a visualization of what the matrix operation looks like (bias term and activation function omitted).
<img src="./Figures/mnist_matmul.png" width="800"/>
```
# Create a random flat input vector
x_fc = torch.randn(100, 1024)
# Create weight matrix variable
W = torch.randn(1024, 10)/np.sqrt(1024)
# Create bias variable
b = torch.zeros(10, requires_grad=True)
# Use `W` and `b` to apply a fully connected layer.
# Store the output in variable `y`.
# Don't forget to apply the activation function.
##################################################
# ---- YOUR CODE HERE ---- #
##################################################
# Print input/output shape
print("Input shape: {}".format(x_fc.shape))
print("Output shape: {}".format(y.shape))
```
#### Convolutional layer
In a convolutional layer, we convolve the input $x$ with a convolutional kernel (aka filter), which we also call $W$, producing output $y$:
\begin{align*}
y = \text{ReLU}(W*x + b)
\end{align*}
In the context of CNNs, the output $y$ is often referred to as feature maps. As with a fully connected layer, the goal is to learn $W$ and $b$ for our model.
Unlike the input of a fully connected layer, which is $x \in \mathbb R^{M\times C_{in}}$, the dimensionality of an image input is 4D: $x \in \mathbb R^{M \times C_{in} \times H_{in} \times W_{in}}$, where $M$ is still the batch size, $C_{in}$ is the number of channels of the input (e.g. 3 for RGB), and $H_{in}$ and $W_{in}$ are the height and width of the image.
The weight parameter $W$ is also different in a convolutional layer.
Unlike the 2-D weight matrix for fully connected layers, the kernel is 4-D with dimensions $W \in \mathbb R^{C_{out} \times C_{in} \times H_K \times W_K }$, where $H_K$ and $W_K$ are the kernel height and weight, respectively.
A common choice for $H_K$ and $W_K$ is $H_K = W_K = 3$ or $5$, but this tends to vary depending on the architecture.
Convolving the input with the kernel and adding a bias then gives an output $y \in \mathbb R^{M \times C_{out} \times H_{out} \times W_{out}}$.
If we use "same" padding and a stride of $1$ in our convolution (more on this later), our output will have the same spatial dimensions as the input: $H_{out}=H_{in}$ and $W_{out}=W_{in}$.
If you're having trouble visualizing this operation in 4D, it's easier to think about for a single member of the minibatch, one convolutional kernel at a time.
Consider a stack of $C_{out}$ number of kernels, each of which are 3D ($C_{in} \times H_K \times W_K $).
This 3D volume is then slid across the input (which is also 3D: $C_{in} \times H_{in} \times W_{in}$) in the two spatial dimensions (along $H_{in}$ and $W_{in}$).
The outputs of the multiplication of the kernel and the input at every location creates a single feature map that is $H_{out} \times W_{out}$.
Stacking the feature maps generated by each kernel gives the 3D output $C_{out} \times H_{out} \times W_{out} $.
Repeat the process for all $M$ inputs in the minibatch, and we get a 4D output $M \times C_{out} \times H_{out} \times W_{out}$.
<img src="./Figures/conv_filters.png" alt="Convolutional filters" style="width: 600px;"/>
A few more things to note:
- Notice the ordering of the dimensions of the input (batch, channels in, height, width).
This is commonly referred to as $NCHW$ ordering.
Many other languages and libraries (e.g. MATLAB, TensorFlow, the image example at the beginning of this notebook) instead default to the slightly different $NHWC$ ordering.
PyTorch defaults to $NCHW$, as it more efficient computationally, especially with CUDA.
- An additional argument for the convolution is the *stride*, which controls the how far we slide the convolutional filter as we move it along the input image.
The convolutional operator, from its signal processing roots, by default considers a stride length of 1 in all dimensions, but in some situations we would like to consider strides more than 1 (or even less than 1).
More on this later.
- In the context of signal processing, convolutions usually result in outputs that are larger than the input size, which results from when the kernel "hangs off the edge" of the input on both sides.
This might not always be desirable.
We can control this by controlling the padding of the input.
Typically, we use pad the input to ensure the output has the same spatial dimensions as the input (assuming stride of 1); this makes it easier for us to keep track of what the size of our model is.
Let's implement this convolution operator in code.
There is a convolution implementation in `torch.nn.functional`, which we use here.
```
# Create a random 4D tensor. Use the NCHW format, where N = 100, C = 3, H = W =32
x_cnn =
# Create convolutional kernel variable (C_out, C_in, H_k, W_k)
W1 =
# Create a bias variable of size C_out
b1 =
# Apply the convolutional layer with relu activation
conv1 =
# Print input/output shape
print("Input shape: {}".format(x_cnn.shape))
print("Convolution output shape: {}".format(conv1.shape))
```
Just like in a MLP, we can stack multiple of these convolutional layers.
In the *Representing Images Digitally* section, we briefly mentioned considering images with channels more than 3.
Observe that the input to the second layer (i.e. the output of the first layer) can be viewed as an "image" with $C_{out}$ channels.
Instead of each channel representing a color content though, each channel effectively represents how much the original input image activated a particular convolutional kernel.
Given $C_{out}$ kernels that are each $C_{in} \times H_K \times W_K$, this results in $C_{out}$ channels for the output of the convolution.
Note that we need to change the dimensions of the convolutional kernel such that its input channels matches the number of output channels of the previous layer:
```
# Create the second convolutional layer by defining a random `W2` and `b2`
W2 =
b2 =
# Apply 2nd convolutional layer to the output of the first convolutional layer
conv2 =
# Print output shape
print("Second convolution output shape: {}".format(conv2.shape))
```
In fact, we typically perform these convolution operations many times.
Popular CNN architectures for image analysis today can be 100+ layers.
### Reshaping
You'll commonly finding yourself needing to reshape tensors while building CNNs.
The PyTorch function for doing so is `view()`.
Anyone familiar with NumPy will find it very similar to `np.reshape()`.
Importantly, the new dimensions must be chosen so that it is possible to rearrange the input into the shape of the output (i.e. the total number of elements must be the same).
As with NumPy, you can optionally replace one of the dimensions with a `-1`, which tells `torch` to infer the missing dimension.
```
M = torch.zeros(4, 3)
M2 = M.view(1,1,12)
M3 = M.view(2,1,2,3)
M4 = M.view(-1,2,3)
M5 = M.view(-1)
```
To get an idea of why reshaping is need in a CNN, let's look at a diagram of a simple CNN.
<img src="Figures/mnist_cnn_ex.png" alt="mnist_cnn_ex" style="width: 800px;"/>
First of all, the CNN expects a 4D input, with the dimensions corresponding to `[batch, channel, height, width]`.
Your data may not come in this format, so you may have to reshape it yourself.
```
x_flat = torch.randn(100, 1024)
# Reshape flat input image into a 4D batched image input
# Hint: Use batch=100, height=width=32.
x_reshaped =
# Print input shape
print(x_reshaped.shape)
```
CNN architectures also commonly contain fully connected layers or a softmax, as we're often interested in classification.
Both of these expect 2D inputs with dimensions `[batch, dim]`, so you have to "flatten" a CNN's 4D output to 2D.
For example, to flatten the convolutional feature maps we created earlier:
```
# Flatten convolutional feature maps into a vector
h_flat = conv2.view(-1, 32*32*32)
# Print output shape
print(h_flat.shape)
```
### Pooling and striding
Almost all CNN architectures incorporate either pooling or striding. This is done for a number of reasons, including:
- Dimensionality reduction: pooling and striding operations reduces computational complexity by shrinking the number of values passed to the next layer.
For example, a 2x2 maxpool reduces the size of the feature maps by a factor of 4.
- Translational invariance: Oftentimes in computer vision, we'd prefer that shifting the input by a few pixels doesn't change the output. Pooling and striding reduces sensitivity to exact pixel locations.
- Increasing receptive field: by summarizing a window with a single value, subsequent convolutional kernels are seeing a wider swath of the original input image. For example, a max pool on some input followed by a 3x3 convolution results in a kernel "seeing" a 6x6 region instead of 3x3.
#### Pooling
The two most common forms of pooling are max pooling and average pooling.
Both reduce values within a window to a single value, on a per-feature-map basis.
Max pooling takes the maximum value of the window as the output value; average pooling takes the mean.
<img src="./Figures/maxpool.png" alt="avg_vs_max" style="width: 800px;"/>
```
# Recreate the values in pooling figure with shape [4,4]
feature_map_fig =
# Convert 2D matrix to a 4D tensor of shape [1,1,4,4].
fmap_fig =
print("Feature map shape pre-pooling: {}".format(fmap_fig.shape))
# Apply max pool to fmap_fig
max_pool_fig =
print("\nMax pool")
print("Shape: {}".format(max_pool_fig.shape))
print(torch.squeeze(max_pool_fig))
# Apply Avgerage pool to fmap_fig
avg_pool_fig =
print("\nAvg pool")
print("Shape: {}".format(avg_pool_fig.shape))
print(torch.squeeze(avg_pool_fig))
```
Now we will apply max pool and average pool to the output of the convolutional layer `conv2`.
```
# Taking the output we've been working with so far, first print its current size
print("Shape of conv2 feature maps before pooling: {0}".format(conv2.shape))
# Apply Max pool with size = 2 and then print new shape.
max_pool2 =
print("Shape of conv2 feature maps after max pooling: {0}".format(max_pool2.shape))
# Average pool with size = 2 and then print new shape
avg_pool2 =
print("Shape of conv2 feature maps after avg pooling: {0}".format(avg_pool2.shape))
```
#### Striding
One might expect that pixels in an image have high correlation with neighboring pixels, so we can save computation by skipping positions while sliding the convolutional kernel.
By default, a CNN slides across the input one pixel at a time, which we call a stride of 1.
By instead striding by 2, we skip calculating 75% of the values of the output feature map, which yields a feature map that's half the size in each spatial direction.
Note, while pooling is an operation done after the convolution, striding is part of the convolution operation itself.
```
# Since striding is part of the convolution operation, we'll start with the feature maps before the 2nd convolution
print("Shape of conv1 feature maps: {0}".format(conv1.shape))
# Apply 2nd convolutional layer, with striding of 2
conv2_strided =
# Print output shape
print("Shape of conv2 feature maps with stride of 2: {0}".format(conv2_strided.shape))
```
## Building a custom CNN
Let's revisit MNIST digit classification, but this time, we'll use the following CNN as our classifier: $5 \times 5$ convolution -> $2 \times 2$ max pool -> $5 \times 5$ convolution -> $2 \times 2$ max pool -> fully connected to $\mathbb R^{256}$ -> fully connected to $\mathbb R^{10}$ (prediction).
ReLU activation functions will be used to impose non-linearities.
Remember, convolutions produce 4-D outputs, and fully connected layers expect 2-D inputs, so tensors must be reshaped when transitioning from one to the other.
We can build this CNN with the components introduced before, but as with the logistic regression example, it may prove helpful to instead organize our model with a `nn.Module`.
```
import torch.nn as nn
# Important: Inherit the `nn.Module` class to define a PyTorch model
class CIFAR_CNN():
def __init__(self):
super().__init__()
# Step 1: Define the first convoluation layer (C_in=3, C_out=32, H_k=W_k=5, padding = 2)
self.conv1 =
# Step 2: Define the second convolutional layer (C_out=64, H_k=W_k=5, padding = 2)
self.conv2 =
# Step 3: Define the first fully-connected layer with an output dimension of 256.
# What should be the input dimension of this layer?
self.fc1 =
# Step 4: Define the second fully-connected layer with an output dimension of 10 (# of classes).
self.fc2 =
def forward(self, x):
# Step 5: Using the layers defined in __init__ function, define the forward pass of the neural network below:
# Apply conv layer 1, activation, and max-pool
# Apply conv layer 2, activation, and max-pool
# Reshape to kernel for fully-connected layer
# Apply fc layer 1 and activation
# Apply fc layer 2
output =
return output
```
Notice how our `nn.Module` contains several operation chained together.
The code for submodule initialization, which creates all the stateful parameters associated with each operation, is placed in the `__init__()` function, where it is run once during object instantiation.
Meanwhile, the code describing the forward pass, which is used every time the model is run, is placed in the `forward()` method.
Printing an instantiated model shows the model summary:
```
model = CIFAR_CNN()
print(model)
```
We can drop this model into our logistic regression training code, with few modifications beyond changing the model itself.
A few other changes:
- CNNs expect a 4-D input, so we no longer have to reshape the images before feeding them to our neural network.
- Since CNNs are a little more complex than models we've worked with before, we're going to increase the number of epochs (complete passes through the training data) during training.
- We switch from a vanilla stochastic gradient descent optimizer to the [Adam](https://arxiv.org/abs/1412.6980) optimizer, which tends to do well for neural networks.
## Training the CNN
```
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from tqdm.notebook import tqdm, trange
cifar_train = datasets.CIFAR10(root="./datasets/cifar-10/", train=True, transform=transforms.ToTensor(), download=True)
cifar_test = datasets.CIFAR10(root="./datasets/cifar-10/", train=False, transform=transforms.ToTensor(), download=True)
# Creatre the train and test data loaders.
train_loader =
test_loader =
# Create a loader identical to the training laoder with a sample size of 8. This is to demonstrate
# how we display images. If we had used the train_loader, we would be looking at 100 images!
sample_loader =
#define an image viewing function
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
#list out the classes for the dataset in order from 0 to 9 to correspond to the integer labels
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
#Take a sample of 1 batch from the sample loader
dataiter = iter(sample_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(8)))
# Instantiate model
model =
# Loss and Optimizer
criterion =
optimizer =
track_loss = []
# Iterate through train set minibatchs
num_training_steps = 0
for epoch in trange(3):
for images, labels in tqdm(train_loader):
# Step 1: Zero out the gradients.
# Step 2: Forward pass.
# Step 3: Compute the loss using `criterion`.
# Step 5: Backward pass.
# Step 6: Update the parameters.
# Step 7: Track the loss value at every 100th step.
if num_training_steps % 100 == 0:
# Append loss to the list.
track_loss.append()
num_training_steps += 1
```
### Let's plot the loss function
```
##################################################
# #
# ---- YOUR CODE HERE ---- #
# #
##################################################
```
## Testing the trained model
```
## Testing
correct = 0
total = len(cifar_test)
with torch.no_grad():
# Iterate through test set minibatchs
for images, labels in tqdm(test_loader):
# Step 1: Forward pass to get
y =
# Step 2: Compute the predicted labels from `y`.
predictions =
# Step 3: Compute the number of samples that were correctly predicted, and maintain the count in the variable `correct`.
correct +=
print('Test accuracy: {}'.format(correct/total))
```
If you are running this notebook on CPU, training this CNN might take a while.
On the other hand, if you use a GPU, this model should train in seconds.
This is why we usually prefer to use GPUs when we have them.
### Torchvision
#### Datasets and transforms
As any experienced ML practioner will say, data wrangling is often half (sometimes even 90%) of the battle when building a model.
Often, we have to write significant code to handle downloading, organizing, formatting, shuffling, pre-processing, augmenting, and batching examples.
For popular datasets, we'd like to standardize data handling so that the comparisons we make are specific to the models themselves.
Enter [Torchvision](https://pytorch.org/vision/stable/index.html).
Torchvision includes easy-to-use APIs for downloading and loading many popular vision datasets.
We've previously seen this in action for downloading the MNIST dataset:
```
from torchvision import datasets
mnist_train = datasets.CIFAR10(root="./datasets", train=True, transform=transforms.ToTensor(), download=True)
```
Of course, there's [many more](https://pytorch.org/vision/stable/datasets.html).
Currently, datasets for image classification (e.g. MNIST, CIFAR, ImageNet), object detection (VOC, COCO, Cityscapes), and video action recognition (UCF101, Kinetics) are included.
For formatting, pre-processing, and augmenting, [transforms](https://pytorch.org/vision/stable/transforms.html) can come in handy.
Again, we've seen this before (see above), when we used a transform to convert the MNIST data from PIL images to PyTorch tensors.
However, transforms can be used for much more.
Preprocessing steps like data whitening are common before feeding the data into the model.
Also, in many cases, we use data augmentations to artificially inflate our dataset and learn invariances.
Transforms are a versatile tool for all of these.
#### Leveraging popular convolutional neural networks
While you certainly can build your own custom CNNs like we did above, more often than not, it's better to use one of the popular existing architectures.
The Torchvision documentation has a [list of supported CNNs](https://pytorch.org/vision/stable/models.html), as well as some performance characteristics.
There's a number of reasons for using one of these CNNs instead of designing your own.
First, for image datasets larger and more complex than CIFAR and MNIST (which is basically all of them), a fair amount network depth and width is often necessary.
For example, some of the popular CNNs can be over 100 layers deep, with several tricks and details beyond what we've covered in this notebook.
Coding all of this yourself has a high potential for error, especially when you're first getting started.
Instead, you can create the CNN architecture using Torchvision, using a couple lines:
```
import torchvision.models as models
resnet18 = models.resnet18()
print(resnet18)
```
Loading a working CNN architecture in a couple lines can save a significant amount of time both implementing and debugging.
The second, perhaps even more important, reason to use one of these existing architectures is the ability to use pre-trained weights.
Early on in the recent resurgence of deep learning, people discovered that the weights of a CNN trained for ImageNet classification were highly transferable.
For example, it is common to use the weights of an ImageNet-trained CNN as a weight initialization for other vision tasks, or even to freeze the bulk of the weights and only re-train the final classification layer(s) on a new task.
This is significant, as in most settings, we rarely have enough labeled data to train a powerful CNN from scratch without overfitting.
Loading pre-trained CNN is also pretty simple, involving an additional argument to the previous cell block:
`resnet18 = models.resnet18(pretrained=True)`
<font size="1">*We will not be using the above command, as running it will initiate a download of the pre-trained weights, which is a fairly large file.*</font>
A full tutorial on using pre-trained CNNs is a little beyond the scope of this notebook.
See [this tutorial](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html) for an example.
#### Other computer vision tasks
The base CNN architectures were often designed for image classification, but the same CNNs are often used as the backbone of most modern computer vision models.
These other models often take this base CNN and include additional networks or make other architecture changes to adapt them to other tasks, such as object detection.
Torchvision contains a few models (and pre-trained weights) for object detection, segmentation, and video action recognition.
For example, to load a [Faster R-CNN](https://arxiv.org/abs/1506.01497) with a [ResNet50](https://arxiv.org/abs/1512.03385) convolutional feature extractor with [Feature Pyramid Networks](https://arxiv.org/abs/1612.03144) pre-trained on [MS COCO](http://cocodataset.org/#home):
`object_detector = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)`
<font size="1">*Again, this line has been commented out to prevent loading a large network for this demo.*</font>
Torchvision's selection of non-classification models is relatively light, and not particularly flexible.
A number of other libraries are available, depending on the task.
For example, for object detection and segmentation, Facebook AI Research's [Detectron2](https://github.com/facebookresearch/detectron2) is highly recommend.
| github_jupyter |
# Tutorial - Time Series Forecasting - Autoregression (AR)
The goal is to forecast time series with the Autoregression (AR) Approach. 1) JetRail Commuter, 2) Air Passengers, 3) Function Autoregression with Air Passengers, and 5) Function Autoregression with Wine Sales.
References Jason Brownlee - https://machinelearningmastery.com/time-series-forecasting-methods-in-python-cheat-sheet/
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import warnings
warnings.filterwarnings("ignore")
# Load File
url = 'https://raw.githubusercontent.com/tristanga/Machine-Learning/master/Data/JetRail%20Avg%20Hourly%20Traffic%20Data%20-%202012-2013.csv'
df = pd.read_csv(url)
df.info()
df.Datetime = pd.to_datetime(df.Datetime,format='%Y-%m-%d %H:%M')
df.index = df.Datetime
```
# Autoregression (AR) Approach with JetRail
The autoregression (AR) method models the next step in the sequence as a linear function of the observations at prior time steps.
The notation for the model involves specifying the order of the model p as a parameter to the AR function, e.g. AR(p). For example, AR(1) is a first-order autoregression model.
The method is suitable for univariate time series without trend and seasonal components.
```
#Split Train Test
import math
total_size=len(df)
split = 10392 / 11856
train_size=math.floor(split*total_size)
train=df.head(train_size)
test=df.tail(len(df) -train_size)
from statsmodels.tsa.ar_model import AR
model = AR(train.Count)
fit1 = model.fit()
y_hat = test.copy()
y_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)
#Plotting data
plt.figure(figsize=(12,8))
plt.plot(train.index, train['Count'], label='Train')
plt.plot(test.index,test['Count'], label='Test')
plt.plot(y_hat.index,y_hat['AR'], label='AR')
plt.legend(loc='best')
plt.title("Autoregression (AR) Forecast")
plt.show()
```
# RMSE Calculation
```
from sklearn.metrics import mean_squared_error
from math import sqrt
rms = sqrt(mean_squared_error(test.Count, y_hat.AR))
print('RMSE = '+str(rms))
```
# Autoregression (AR) Approach with Air Passagers
```
# Subsetting
url = 'https://raw.githubusercontent.com/tristanga/Machine-Learning/master/Data/International%20Airline%20Passengers.csv'
df = pd.read_csv(url, sep =";")
df.info()
df.Month = pd.to_datetime(df.Month,format='%Y-%m')
df.index = df.Month
#df.head()
#Creating train and test set
import math
total_size=len(df)
train_size=math.floor(0.7*total_size) #(70% Dataset)
train=df.head(train_size)
test=df.tail(len(df) -train_size)
#train.info()
#test.info()
from statsmodels.tsa.ar_model import AR
# Create prediction table
y_hat = test.copy()
model = AR(train['Passengers'])
fit1 = model.fit()
y_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)
y_hat.describe()
plt.figure(figsize=(12,8))
plt.plot(train.index, train['Passengers'], label='Train')
plt.plot(test.index,test['Passengers'], label='Test')
plt.plot(y_hat.index,y_hat['AR'], label='AR')
plt.legend(loc='best')
plt.title("Autoregression (AR)")
plt.show()
from sklearn.metrics import mean_squared_error
from math import sqrt
rms = sqrt(mean_squared_error(test.Passengers, y_hat.AR))
print('RMSE = '+str(rms))
```
# Function Autoregression (AR) Approach with variables
```
def AR_forecasting(mydf,colval,split):
#print(split)
import math
from statsmodels.tsa.api import Holt
from sklearn.metrics import mean_squared_error
from math import sqrt
global y_hat, train, test
total_size=len(mydf)
train_size=math.floor(split*total_size) #(70% Dataset)
train=mydf.head(train_size)
test=mydf.tail(len(mydf) -train_size)
y_hat = test.copy()
model = AR(train[colval])
fit1 = model.fit()
y_hat['AR'] = fit1.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)
plt.figure(figsize=(12,8))
plt.plot(train.index, train[colval], label='Train')
plt.plot(test.index,test[colval], label='Test')
plt.plot(y_hat.index,y_hat['AR'], label='AR')
plt.legend(loc='best')
plt.title("Autoregression (AR) Forecast")
plt.show()
rms = sqrt(mean_squared_error(test[colval], y_hat.AR))
print('RMSE = '+str(rms))
AR_forecasting(df,'Passengers',0.7)
```
# Testing Function Autoregression (AR) Approach with Wine Dataset
```
url = 'https://raw.githubusercontent.com/tristanga/Data-Cleaning/master/Converting%20Time%20Series/Wine_Sales_R_Dataset.csv'
df = pd.read_csv(url)
df.info()
df.Date = pd.to_datetime(df.Date,format='%Y-%m-%d')
df.index = df.Date
AR_forecasting(df,'Sales',0.7)
```
| github_jupyter |
# Tune TensorFlow Serving
## Guidelines
### CPU-only
If your system is CPU-only (no GPU), then consider the following values:
* `num_batch_threads` equal to the number of CPU cores
* `max_batch_size` to infinity (ie. MAX_INT)
* `batch_timeout_micros` to 0.
Then experiment with batch_timeout_micros values in the 1-10 millisecond (1000-10000 microsecond) range, while keeping in mind that 0 may be the optimal value.
### GPU
If your model uses a GPU device for part or all of your its inference work, consider the following value:
* `num_batch_threads` to the number of CPU cores.
* `batch_timeout_micros` to infinity while tuning `max_batch_size` to achieve the desired balance between throughput and average latency. Consider values in the hundreds or thousands.
For online serving, tune `batch_timeout_micros` to rein in tail latency.
The idea is that batches normally get filled to max_batch_size, but occasionally when there is a lapse in incoming requests, to avoid introducing a latency spike it makes sense to process whatever's in the queue even if it represents an underfull batch.
The best value for `batch_timeout_micros` is typically a few milliseconds, and depends on your context and goals.
Zero is a value to consider as it works well for some workloads. For bulk-processing batch jobs, choose a large value, perhaps a few seconds, to ensure good throughput but not wait too long for the final (and likely underfull) batch.
## Close TensorFlow Serving and Load Test Terminals
## Open a Terminal through Jupyter Notebook
### (Menu Bar -> File -> New...)

## Enable Request Batching
## Start TensorFlow Serving in Separate Terminal
The params are as follows:
* `port` for TensorFlow Serving (int)
* `model_name` (anything)
* `model_base_path` (/path/to/model/ above all versioned sub-directories)
* `enable_batching` (true|false)
```
tensorflow_model_server \
--port=9000 \
--model_name=linear \
--model_base_path=/root/models/linear_fully_optimized/cpu \
--batching_parameters_file=/root/config/tf_serving/batch_config.txt \
--enable_batching=true \
```
### `batch_config.txt`
* `num_batch_threads` (usually equal to the number of CPU cores or a multiple thereof)
* `max_batch_size` (# of requests - start with infinity, tune down to find the right balance between latency and throughput)
* `batch_timeout_micros` (minimum batch window duration)
```
num_batch_threads { value: 100 }
max_batch_size { value: 99999999 }
batch_timeout_micros { value: 100000 }
```
## Start Load Test in the Terminal
```
loadtest high
```
Notice the throughput and avg/min/max latencies:
```
summary ... = 301.1/s Avg: 227 Min: 3 Max: 456 Err: 0 (0.00%)
```
## Modify Request Batching Parameters, Repeat Load Test
Gain intuition on the performance impact of changing the request batching parameters.
| github_jupyter |
## The Basics
At the core of Python (and any programming language) there are some key characteristics of how a program is structured that enable the proper execution of that program. These characteristics include the structure of the code itself, the core data types from which others are built, and core operators that modify objects or create new ones. From these raw materials more complex commands, functions, and modules are built.
For guidance on recommended Python structure refer to the [Python Style Guide](https://www.python.org/dev/peps/pep-0008).
# Examples: Variables and Data Types
## The Interpreter
```
# The interpreter can be used as a calculator, and can also echo or concatenate strings.
3 + 3
3 * 3
3 ** 3
3 / 2 # classic division - output is a floating point number
# Use quotes around strings, single or double, but be consistent to the extent possible
'dogs'
"dogs"
"They're going to the beach"
'He said "I like mac and cheese"'
# sometimes you can't escape the escape
'He said "I\'d like mac and cheese"'
# + operator can be used to concatenate strings
'dogs' + "cats"
print('Hello World!')
```
### Try It Yourself
Go to the section _4.4. Numeric Types_ in the Python 3 documentation at <https://docs.python.org/3.4/library/stdtypes.html>. The table in that section describes different operators - try some!
What is the difference between the different division operators (`/`, `//`, and `%`)?
## Variables
Variables allow us to store values for later use.
```
a = 5
b = 10
a + b
```
Variables can be reassigned:
```
b = 38764289.1097
a + b
```
The ability to reassign variable values becomes important when iterating through groups of objects for batch processing or other purposes. In the example below, the value of `b` is dynamically updated every time the `while` loop is executed:
```
a = 5
b = 10
while b > a:
print("b="+str(b))
b = b-1
```
Variable data types can be inferred, so Python does not require us to declare the data type of a variable on assignment.
```
a = 5
type(a)
```
is equivalent to
```
a = int(5)
type(a)
c = 'dogs'
print(type(c))
c = str('dogs')
print(type(c))
```
There are cases when we may want to declare the data type, for example to assign a different data type from the default that will be inferred. Concatenating strings provides a good example.
```
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + pizzas + ' pizzas.')
```
Above, Python has inferred the type of the variable `pizza` to be an integer. Since strings can only be concatenated with other strings, our print statement generates an error. There are two ways we can resolve the error:
1. Declare the `pizzas` variable as type string (`str`) on assignment or
2. Re-cast the `pizzas` variable as a string within the `print` statement.
```
customer = 'Carol'
pizzas = str(2)
print(customer + ' ordered ' + pizzas + ' pizzas.')
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + str(pizzas) + ' pizzas.')
```
Given the following variable assignments:
```
x = 12
y = str(14)
z = donuts
```
Predict the output of the following:
1. `y + z`
2. `x + y`
3. `x + int(y)`
4. `str(x) + y`
Check your answers in the interpreter.
### Variable Naming Rules
Variable names are case senstive and:
1. Can only consist of one "word" (no spaces).
2. Must begin with a letter or underscore character ('\_').
3. Can only use letters, numbers, and the underscore character.
We further recommend using variable names that are meaningful within the context of the script and the research.
## Reading Files
We can accomplish a lot by assigning variables within our code as demonstrated above, but often we are interested in working with objects and data that exist in other files and directories on our system.
When we want to read data files into a script, we do so by assigning the content of the file to a variable. This stores the data in memory and lets us perform processes and analyses on the data without changing the content of the source file.
There are several ways to read files in Python - many libraries have methods for reading text, Excel and Word documents, PDFs, etc. This morning we're going to demonstrate using the ```read()``` and ```readlines()``` method in the standard library, and the Pandas```read_csv()``` function.
```
# Read unstructured text
# One way is to open the whole file as a block
file_path = "./beowulf" # We can save the path to the file as a variable
file_in = open(file_path, "r") # Options are 'r', 'w', and 'a' (read, write, append)
beowulf_a = file_in.read()
file_in.close()
print(beowulf_a)
# Another way is to read the file as a list of individual lines
with open(file_path, "r") as b:
beowulf_b = b.readlines()
print(beowulf_b)
# In order to get a similar printout to the first method, we use a for loop
# to print line by line - more on for loops below!
for l in beowulf_b:
print(l)
# We now have two variables with the content of our 'beowulf' file represented using two different data structures.
# Why do you think we get the different outputs from the next two statements?
# Beowulf text stored as one large string
print("As string:", beowulf_a[0])
# Beowulf text stored as a list of lines
print("As list of lines:", beowulf_b[0])
# We can confirm our expectations by checking on the types of our two beowulf variables
print(type(beowulf_a))
print(type(beowulf_b))
# Read CSV files using the Pandas read_csv method.
# Note: Pandas also includes methods for reading Excel.
# First we need to import the pandas library
import pandas as pd
# Create a variable to hold the path to the file
fpath = "aaj1945_DataS1_Egg_shape_by_species_v2.csv"
egg_data = pd.read_csv(fpath)
# We can get all kinds of info about the dataset
# info() provides an overview of the structure
print(egg_data.info())
# Look at the first five rows
egg_data.head()
# Names of columns
print(egg_data.columns.values)
# Dimensions (number of rows and columns)
print(egg_data.shape)
# And much more! But as a final example we can perform operations on the data.
# Descriptive statistics on the "Number of eggs" column
print(egg_data["Number of eggs"].describe())
# Or all of the columns in whole table with numeric data types:
print(egg_data.describe())
```
### Structure
Now that we have practiced assigning variables and reading information from files, we will have a look at concepts that are key to developing processes to use and analyze this information.
#### Blocks
The structure of a Python program is pretty simple:
Blocks of code are defined using indentation. Code that is at a lower level of indentation is not considerd part of a block. Indentation can be defined using spaces or tabs (spaces are recommended by the style guide), but be consistent (and prepared to defend your choice). As we will see, code blocks define the boundaries of sets of commands that fit within a given section of code. This indentation model for defining blocks of code significantly increases the readabiltiy of Python code.
For example:
>>>a = 5
>>>b = 10
>>>while b > a:
... print("b="+str(b))
... b = b-1
>>>print("I'm outside the block")
#### Comments & Documentation
You can (and should) also include documentation and comments in the code your write - both for yourself, and potential future users (including yourself). Comments are pretty much any content on a line that follows a `#` symbol (unless it is between quotation marks. For example:
>>># we're going to do some math now
>>>yae = 5 # the number of votes in favor
>>>nay = 10 # the number of votes against
>>>proportion = yae / nay # the proportion of votes in favor
>>>print(proportion)
When you are creating functions or classes (a bit more on what these are in a bit) you can also create what are called *doc strings* that provide a defined location for content that is used to generate the `help()` information highlighted above and is also used by other systems for the automatic generation of documentation for packages that contain these *doc strings*. Creating a *doc string* is simple - just create a single or multi-line text string (more on this soon) that starts on the first indented line following the start of the definition of the function or class. For example:
>>># we're going to create a documented function and then access the information about the function
>>>def doc_demo(some_text="Ill skewer yer gizzard, ye salty sea bass"):
... """This function takes the provided text and prints it out in Pirate
...
... If a string is not provided for `some_text` a default message will be displayed
... """
... out_string = "Ahoy Matey. " + some_text
... print(out_string)
>>>help(doc_demo)
>>>doc_demo()
>>>doc_demo("Sail ho!")
### Standard Objects
Any programming language has at its foundation a collection of *types* or in Python's terminology *objects*. The standard objects of Python consist of the following:
* **Numbers** - integer, floating point, complex, and multiple-base defined numeric values
* **Strings** - **immutable** strings of characters, numbers, and symbols that are bounded by single- or double-quotes
* **Lists** - an ordered collection of objects that is bounded by square-brackets - `[]`. Elements in lists are extracted or referenced by their position in the list. For example, `my_list[0]` refers to the first item in the list, `my_list[5]` the sixth, and `my_list[-1]` to the last item in the list.
* **Dictionaries** - an unordered collection of objects that are referenced by *keys* that allow for referring to those objexts by reference to those keys. Dictionaries are bounded by curley-brackets - `{}` with each element of the dictionary consisting of a *key* (string) and a *value* (object) separated by a colon `:`. Elements of a dictionary are extracted or referenced using their keys. for example:
my_dict = {"key1":"value1", "key2":36, "key3":[1,2,3]}
my_dict['key1'] returns "value1"
my_dict['key3'] returns [1,2,3]
* **Tuples** - **immutable** lists that are bounded by parentheses = `()`. Referencing elements in a tuple is the same as referencing elements in a list above.
* **Files** - objects that represent external files on the file system. Programs can interact with (e.g. read, write, append) external files through their representative file objects in the program.
* **Sets** - unordered, collections of **immutable** objects (i.e. ints, floats, strings, and tuples) where membership in the set and uniqueness within the set are defining characteristics of the member objects. Sets are created using the `set` function on a sequence of objects. A specialized list of operators on sets allow for identifying *union*, *intersection*, and *difference* (among others) between sets.
* **Other core types** - Booleans, types, `None`
* **Program unit types** - *functions*, *modules*, and *classes* for example
* **Implementation-related types** (not covered in this workshop)
These objects have their own sets of related methods (as we saw in the `help()` examples above) that enable their creation, and operations upon them.
```
# Fun with types
this = 12
that = 15
the_other = "27"
my_stuff = [this,that,the_other,["a","b","c",4]]
more_stuff = {
"item1": this,
"item2": that,
"item3": the_other,
"item4": my_stuff
}
this + that
# this won't work ...
# this + that + the_other
# ... but this will ...
this + that + int(the_other)
# ...and this too
str(this) + str(that) + the_other
```
## Lists
<https://docs.python.org/3/library/stdtypes.html?highlight=lists#list>
Lists are a type of collection in Python. Lists allow us to store sequences of items that are typically but not always similar. All of the following lists are legal in Python:
```
# Separate list items with commas!
number_list = [1, 2, 3, 4, 5]
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
combined_list = [1, 2, 'oranges', 3.14, 'peaches', 'grapes', 99.19876]
# Nested lists - lists of lists - are allowed.
list_of_lists = [[1, 2, 3],
['oranges', 'grapes', 8],
[['small list'],
['bigger', 'list', 55],
['url_1', 'url_2']
]
]
```
There are multiple ways to create a list:
```
# Create an empty list
empty_list = []
# As we did above, by using square brackets around a comma-separated sequence of items
new_list = [1, 2, 3]
# Using the type constructor
constructed_list = list('purple')
# Using a list comprehension
result_list = [i for i in range(1, 20)]
```
We can inspect our lists:
```
empty_list
new_list
result_list
constructed_list
```
The above output for `constructed_list` may seem odd. Referring to the documentation, we see that the argument to the type constructor is an _iterable_, which according to the documentation is "An object capable of returning its members one at a time." In our construtor statement above
```
# Using the type constructor
constructed_list = list('purple')
```
the word 'purple' is the object - in this case a ```str``` (string) consisting of the word 'purple' - that when used to construct a list returns its members (individual letters) one at a time.
Compare the outputs below:
```
constructed_list_int = list(123)
constructed_list_str = list('123')
constructed_list_str
```
Lists in Python are:
* mutable - the list and list items can be changed
* ordered - list items keep the same "place" in the list
_Ordered_ here does not mean sorted. The list below is printed with the numbers in the order we added them to the list, not in numeric order:
```
ordered = [3, 2, 7, 1, 19, 0]
ordered
# There is a 'sort' method for sorting list items as needed:
ordered.sort()
ordered
```
Info on additional list methods is available at <https://docs.python.org/3/library/stdtypes.html?highlight=lists#mutable-sequence-types>
Because lists are ordered, it is possible to access list items by referencing their positions. Note that the position of the first item in a list is 0 (zero), not 1!
```
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
string_list[0]
# We can use positions to 'slice' or select sections of a list:
string_list[3:] # start at index '3' and continue to the end
string_list[:3] # start at index '0' and go up to, but don't include index '3'
string_list[1:4] # start at index '1' and go up to and don't include index '4'
# If we don't know the position of a list item, we can use the 'index()' method to find out.
# Note that in the case of duplicate list items, this only returns the position of the first one:
string_list.index('pears')
string_list.append('oranges')
string_list
string_list.index('oranges')
# one more time with lists and dictionaries
list_ex1 = my_stuff[0] + my_stuff[1] + int(my_stuff[2])
print(list_ex1)
# we can use parentheses to split a continuous group of commands over multiple lines
list_ex2 = (
str(my_stuff[0])
+ str(my_stuff[1])
+ my_stuff[2]
+ my_stuff[3][0]
)
print(list_ex2)
dict_ex1 = (
more_stuff['item1']
+ more_stuff['item2']
+ int(more_stuff['item3'])
)
print(dict_ex1)
dict_ex2 = (
str(more_stuff['item1'])
+ str(more_stuff['item2'])
+ more_stuff['item3']
)
print(dict_ex2)
# Now try it yourself ...
# print out the phrase "The answer: 42" using the following
# variables and one or more of your own and the 'print()' function
# (remember spaces are characters as well)
start = "The"
answer = 42
```
### Operators
If *objects* are the nouns, operators are the verbs of a programming language. We've already seen examples of some operators: *assignment* with the `=` operator, *arithmetic* addition *and* string concatenation with the `+` operator, *arithmetic* division with the `/` and `-` operators, and *comparison* with the `>` operator. Different object types have different operators that may be used with them. The [Python Documentation](https://docs.python.org/3/library/stdtypes.html) provides detailed information about the operators and their functions as they relate to the standard object types described above.
### Flow Control and Logical Tests
Flow control commands allow for the dynamic execution of parts of the program based upon logical conditions, or processing of objects within an *iterable* object (like a list or dictionary). Some key flow control commands in python include:
* `while-else` loops that continue to run until the termination test is `False` or a `break` command is issued within the loop:
done = False
i = 0
while not done:
i = i+1
if i > 5: done = True
* `if-elif-else` statements defined alternative blocks of code that are executed if a test condition is met:
do_something = "what?"
if do_something == "what?":
print(do_something)
elif do_something == "where?":
print("Where are we going?")
else:
print("I guess nothing is going to happen")
* `for` loops allow for repeated execution of a block of code for each item in a python sequence such as a list or dictionary. For example:
my_stuff = ['a', 'b', 'c']
for item in my_stuff:
print(item)
a
b
c
| github_jupyter |
# Bayesian Optimization
[Bayesian optimization](https://en.wikipedia.org/wiki/Bayesian_optimization) is a powerful strategy for minimizing (or maximizing) objective functions that are costly to evaluate. It is an important component of [automated machine learning](https://en.wikipedia.org/wiki/Automated_machine_learning) toolboxes such as [auto-sklearn](https://automl.github.io/auto-sklearn/stable/), [auto-weka](http://www.cs.ubc.ca/labs/beta/Projects/autoweka/), and [scikit-optimize](https://scikit-optimize.github.io/), where Bayesian optimization is used to select model hyperparameters. Bayesian optimization is used for a wide range of other applications as well; as cataloged in the review [2], these include interactive user-interfaces, robotics, environmental monitoring, information extraction, combinatorial optimization, sensor networks, adaptive Monte Carlo, experimental design, and reinforcement learning.
## Problem Setup
We are given a minimization problem
$$ x^* = \text{arg}\min \ f(x), $$
where $f$ is a fixed objective function that we can evaluate pointwise.
Here we assume that we do _not_ have access to the gradient of $f$. We also
allow for the possibility that evaluations of $f$ are noisy.
To solve the minimization problem, we will construct a sequence of points $\{x_n\}$ that converge to $x^*$. Since we implicitly assume that we have a fixed budget (say 100 evaluations), we do not expect to find the exact minumum $x^*$: the goal is to get the best approximate solution we can given the allocated budget.
The Bayesian optimization strategy works as follows:
1. Place a prior on the objective function $f$. Each time we evaluate $f$ at a new point $x_n$, we update our model for $f(x)$. This model serves as a surrogate objective function and reflects our beliefs about $f$ (in particular it reflects our beliefs about where we expect $f(x)$ to be close to $f(x^*)$). Since we are being Bayesian, our beliefs are encoded in a posterior that allows us to systematically reason about the uncertainty of our model predictions.
2. Use the posterior to derive an "acquisition" function $\alpha(x)$ that is easy to evaluate and differentiate (so that optimizing $\alpha(x)$ is easy). In contrast to $f(x)$, we will generally evaluate $\alpha(x)$ at many points $x$, since doing so will be cheap.
3. Repeat until convergence:
+ Use the acquisition function to derive the next query point according to
$$ x_{n+1} = \text{arg}\min \ \alpha(x). $$
+ Evaluate $f(x_{n+1})$ and update the posterior.
A good acquisition function should make use of the uncertainty encoded in the posterior to encourage a balance between exploration—querying points where we know little about $f$—and exploitation—querying points in regions we have good reason to think $x^*$ may lie. As the iterative procedure progresses our model for $f$ evolves and so does the acquisition function. If our model is good and we've chosen a reasonable acquisition function, we expect that the acquisition function will guide the query points $x_n$ towards $x^*$.
In this tutorial, our model for $f$ will be a Gaussian process. In particular we will see how to use the [Gaussian Process module](http://docs.pyro.ai/en/0.3.1/contrib.gp.html) in Pyro to implement a simple Bayesian optimization procedure.
```
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import torch
import torch.autograd as autograd
import torch.optim as optim
from torch.distributions import constraints, transform_to
import pyro
import pyro.contrib.gp as gp
assert pyro.__version__.startswith('1.5.2')
pyro.set_rng_seed(1)
```
## Define an objective function
For the purposes of demonstration, the objective function we are going to consider is the [Forrester et al. (2008) function](https://www.sfu.ca/~ssurjano/forretal08.html):
$$f(x) = (6x-2)^2 \sin(12x-4), \quad x\in [0, 1].$$
This function has both a local minimum and a global minimum. The global minimum is at $x^* = 0.75725$.
```
def f(x):
return (6 * x - 2)**2 * torch.sin(12 * x - 4)
```
Let's begin by plotting $f$.
```
x = torch.linspace(0, 1)
plt.figure(figsize=(8, 4))
plt.plot(x.numpy(), f(x).numpy())
plt.show()
```
## Setting a Gaussian Process prior
[Gaussian processes](https://en.wikipedia.org/wiki/Gaussian_process) are a popular choice for a function priors due to their power and flexibility. The core of a Gaussian Process is its covariance function $k$, which governs the similarity of $f(x)$ for pairs of input points. Here we will use a Gaussian Process as our prior for the objective function $f$. Given inputs $X$ and the corresponding noisy observations $y$, the model takes the form
$$f\sim\mathrm{MultivariateNormal}(0,k(X,X)),$$
$$y\sim f+\epsilon,$$
where $\epsilon$ is i.i.d. Gaussian noise and $k(X,X)$ is a covariance matrix whose entries are given by $k(x,x^\prime)$ for each pair of inputs $(x,x^\prime)$.
We choose the [Matern](https://en.wikipedia.org/wiki/Mat%C3%A9rn_covariance_function) kernel with $\nu = \frac{5}{2}$ (as suggested in reference [1]). Note that the popular [RBF](https://en.wikipedia.org/wiki/Radial_basis_function_kernel) kernel, which is used in many regression tasks, results in a function prior whose samples are infinitely differentiable; this is probably an unrealistic assumption for most 'black-box' objective functions.
```
# initialize the model with four input points: 0.0, 0.33, 0.66, 1.0
X = torch.tensor([0.0, 0.33, 0.66, 1.0])
y = f(X)
gpmodel = gp.models.GPRegression(X, y, gp.kernels.Matern52(input_dim=1),
noise=torch.tensor(0.1), jitter=1.0e-4)
```
The following helper function `update_posterior` will take care of updating our `gpmodel` each time we evaluate $f$ at a new value $x$.
```
def update_posterior(x_new):
y = f(x_new) # evaluate f at new point.
X = torch.cat([gpmodel.X, x_new]) # incorporate new evaluation
y = torch.cat([gpmodel.y, y])
gpmodel.set_data(X, y)
# optimize the GP hyperparameters using Adam with lr=0.001
optimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)
gp.util.train(gpmodel, optimizer)
```
## Define an acquisition function
There are many reasonable options for the acquisition function (see references [1] and [2] for a list of popular choices and a discussion of their properties). Here we will use one that is 'simple to implement and interpret,' namely the 'Lower Confidence Bound' acquisition function.
It is given by
$$
\alpha(x) = \mu(x) - \kappa \sigma(x)
$$
where $\mu(x)$ and $\sigma(x)$ are the mean and square root variance of the posterior at the point $x$, and the arbitrary constant $\kappa>0$ controls the trade-off between exploitation and exploration. This acquisition function will be minimized for choices of $x$ where either: i) $\mu(x)$ is small (exploitation); or ii) where $\sigma(x)$ is large (exploration). A large value of $\kappa$ means that we place more weight on exploration because we prefer candidates $x$ in areas of high uncertainty. A small value of $\kappa$ encourages exploitation because we prefer candidates $x$ that minimize $\mu(x)$, which is the mean of our surrogate objective function. We will use $\kappa=2$.
```
def lower_confidence_bound(x, kappa=2):
mu, variance = gpmodel(x, full_cov=False, noiseless=False)
sigma = variance.sqrt()
return mu - kappa * sigma
```
The final component we need is a way to find (approximate) minimizing points $x_{\rm min}$ of the acquisition function. There are several ways to proceed, including gradient-based and non-gradient-based techniques. Here we will follow the gradient-based approach. One of the possible drawbacks of gradient descent methods is that the minimization algorithm can get stuck at a local minimum. In this tutorial, we adopt a (very) simple approach to address this issue:
- First, we seed our minimization algorithm with 5 different values: i) one is chosen to be $x_{n-1}$, i.e. the candidate $x$ used in the previous step; and ii) four are chosen uniformly at random from the domain of the objective function.
- We then run the minimization algorithm to approximate convergence for each seed value.
- Finally, from the five candidate $x$s identified by the minimization algorithm, we select the one that minimizes the acquisition function.
Please refer to reference [2] for a more detailed discussion of this problem in Bayesian Optimization.
```
def find_a_candidate(x_init, lower_bound=0, upper_bound=1):
# transform x to an unconstrained domain
constraint = constraints.interval(lower_bound, upper_bound)
unconstrained_x_init = transform_to(constraint).inv(x_init)
unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(True)
minimizer = optim.LBFGS([unconstrained_x], line_search_fn='strong_wolfe')
def closure():
minimizer.zero_grad()
x = transform_to(constraint)(unconstrained_x)
y = lower_confidence_bound(x)
autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
return y
minimizer.step(closure)
# after finding a candidate in the unconstrained domain,
# convert it back to original domain.
x = transform_to(constraint)(unconstrained_x)
return x.detach()
```
## The inner loop of Bayesian Optimization
With the various helper functions defined above, we can now encapsulate the main logic of a single step of Bayesian Optimization in the function `next_x`:
```
def next_x(lower_bound=0, upper_bound=1, num_candidates=5):
candidates = []
values = []
x_init = gpmodel.X[-1:]
for i in range(num_candidates):
x = find_a_candidate(x_init, lower_bound, upper_bound)
y = lower_confidence_bound(x)
candidates.append(x)
values.append(y)
x_init = x.new_empty(1).uniform_(lower_bound, upper_bound)
argmin = torch.min(torch.cat(values), dim=0)[1].item()
return candidates[argmin]
```
## Running the algorithm
To illustrate how Bayesian Optimization works, we make a convenient plotting function that will help us visualize our algorithm's progress.
```
def plot(gs, xmin, xlabel=None, with_title=True):
xlabel = "xmin" if xlabel is None else "x{}".format(xlabel)
Xnew = torch.linspace(-0.1, 1.1)
ax1 = plt.subplot(gs[0])
ax1.plot(gpmodel.X.numpy(), gpmodel.y.numpy(), "kx") # plot all observed data
with torch.no_grad():
loc, var = gpmodel(Xnew, full_cov=False, noiseless=False)
sd = var.sqrt()
ax1.plot(Xnew.numpy(), loc.numpy(), "r", lw=2) # plot predictive mean
ax1.fill_between(Xnew.numpy(), loc.numpy() - 2*sd.numpy(), loc.numpy() + 2*sd.numpy(),
color="C0", alpha=0.3) # plot uncertainty intervals
ax1.set_xlim(-0.1, 1.1)
ax1.set_title("Find {}".format(xlabel))
if with_title:
ax1.set_ylabel("Gaussian Process Regression")
ax2 = plt.subplot(gs[1])
with torch.no_grad():
# plot the acquisition function
ax2.plot(Xnew.numpy(), lower_confidence_bound(Xnew).numpy())
# plot the new candidate point
ax2.plot(xmin.numpy(), lower_confidence_bound(xmin).numpy(), "^", markersize=10,
label="{} = {:.5f}".format(xlabel, xmin.item()))
ax2.set_xlim(-0.1, 1.1)
if with_title:
ax2.set_ylabel("Acquisition Function")
ax2.legend(loc=1)
```
Our surrogate model `gpmodel` already has 4 function evaluations at its disposal; however, we have yet to optimize the GP hyperparameters. So we do that first. Then in a loop we call the `next_x` and `update_posterior` functions repeatedly. The following plot illustrates how Gaussian Process posteriors and the corresponding acquisition functions change at each step in the algorith. Note how query points are chosen both for exploration and exploitation.
```
plt.figure(figsize=(12, 30))
outer_gs = gridspec.GridSpec(5, 2)
optimizer = torch.optim.Adam(gpmodel.parameters(), lr=0.001)
gp.util.train(gpmodel, optimizer)
for i in range(8):
xmin = next_x()
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer_gs[i])
plot(gs, xmin, xlabel=i+1, with_title=(i % 2 == 0))
update_posterior(xmin)
plt.show()
```
Because we have assumed that our observations contain noise, it is improbable that we will find the exact minimizer of the function $f$. Still, with a relatively small budget of evaluations (8) we see that the algorithm has converged to very close to the global minimum at $x^* = 0.75725$.
While this tutorial is only intended to be a brief introduction to Bayesian Optimization, we hope that we have been able to convey the basic underlying ideas. Consider watching the lecture by Nando de Freitas [3] for an excellent exposition of the basic theory. Finally, the reference paper [2] gives a review of recent research on Bayesian Optimization, together with many discussions about important technical details.
## References
[1] `Practical bayesian optimization of machine learning algorithms`,<br />
Jasper Snoek, Hugo Larochelle, and Ryan P. Adams
[2] `Taking the human out of the loop: A review of bayesian optimization`,<br />
Bobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P. Adams, and Nando De Freitas
[3] [Machine learning - Bayesian optimization and multi-armed bandits](https://www.youtube.com/watch?v=vz3D36VXefI)
| github_jupyter |
# Exploratory Data Analysis
In this notebook, I have illuminated some of the strategies that one can use to explore the data and gain some insights about it.
We will start from finding metadata about the data, to determining what techniques to use, to getting some important insights about the data. This is based on the IBM's Data Analysis with Python course on Coursera.
## The Problem
The problem is to find the variables that impact the car price. For this problem, we will use a real-world dataset that details information about cars.
The dataset used is an open-source dataset made available by Jeffrey C. Schlimmer. The one used in this notebook is hosted on the IBM Cloud. The dataset provides details of some cars. It includes properties like make, horse-power, price, wheel-type and so on.
## Loading data and finding the metadata
Import libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
%matplotlib inline
```
Load the data as pandas dataframe
```
path='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/automobileEDA.csv'
df = pd.read_csv(path)
df.head()
```
### Metadata: The columns's types
Finding column's types is an important step. It serves two purposes:
1. See if we need to convert some data. For example, price may be in string instead of numbers. This is very important as it could throw everything that we do afterwards off.
2. Find out what type of analysis we need to do with what column. After fixing the problems given above, the type of the object is often a great indicator of whether the data is categorical or numerical. This is important as it would determine what kind of exploratory analysis we can and want to do.
To find out the type, we can simply use `.dtypes` property of the dataframe. Here's an example using the dataframe we loaded above.
```
df.dtypes
```
From the results above, we can see that we can roughly divide the types into two categories: numeric (int64 and float64) and object. Although object type can contain lots of things, it's used often to store string variables. A quick glance at the table tells us that there's no glaring errors in object types.
Now we divide them into two categories: numerical variables and categorical variables. Numerical, as the name states, are the variables that hold numerical data. Categorical variables hold string that describes a certain property of the data (such as Audi as the make).
Make a special note that our target variable, price, is numerical. So the relationships we would be exploring would be between numerical-and-numerical data and numerical-and-categorical data.
## Relationship between Numerical Data
First we will explore the relationship between two numerical data and see if we can learn some insights out of it.
In the beginning, it's helpful to get the correlation between the variables. For this, we can use the `corr()` method to find out the correlation between all the variables.
Do note that the method finds out the Pearson correlation. Natively, pandas also support Spearman and the Kendall Tau correlation. You can also pass in a custom callable if you want. Check out the docs for more info.
Here's how to do it with the dataframe that we have:
```
df.corr()
```
Note that the diagonal elements are always one; because correlation with itself is always one.
Now, it seems somewhat daunting, and frankly, unneccessary to have this big of a table and correlation between things we don't care (say bore and stroke). If we want to find out the correlation with just price, using `corrwith()` method is helpful.
Here's how to do it:
```
corr = df.corrwith(df['price'])
# Prettify
pd.DataFrame(data=corr.values, index=corr.index, columns=['Correlation'])
```
From the table above, we have some idea about what can we expect the relationship should be like.
As a refresher, in Pearson correlation, values range in [-1, 1] with -1 and 1 implying a perfect linear relationship and 0 implying none. A positive value implies a positive relationship (value increase in response to increment) and negative value implies negative relationship (value decrease in response to increment).
The next step is to have a more visual outlook on the relationship.
### Visualizing Relationships
Continuous numerical variables are variables that may contain any value within some range. In pandas dtype, continuous numerical variables can have the type "int64" or "float64".
Scatterplots are a great way to visualize these variables is by using scatterplots.
To take it further, it's better to use a scatter plot with a regression line. This should also be able to provide us with some preliminary ways to test our hypothesis of the relationship between them.
In this notebook, we would be using the `regplot()` function in the `seaborn` package.
Below are some examples.
<h4>Positive linear relationship</h4>
Let's plot "engine-size" vs "price" since the correlation between them seems strong.
```
plt.figure(figsize=(5,5))
sns.regplot(x="engine-size", y="price", data=df);
```
As the engine-size goes up, the price goes up. This indicates a decent positive direct correlation between these two variables. Thus, we can say that the engine size is a good predictor of price since the regression line is almost a perfect diagonal line.
We can also check this with the Pearson correlation we got above. It's 0.87, which means sense.
Let's also try highway mpg too since the correlation between them is -0.7
```
sns.regplot(x="highway-mpg", y="price", data=df);
```
The graph shows a decent negative realtionship. So, it could be a potential indicator. Although, it seems that the relationship isn't exactly normal--given the curve of the points.
Let's try a higher order regression line.
```
sns.regplot(x="highway-mpg", y="price", data=df, order=2);
```
There. It seems much better.
### Weak Linear Relationship
Not all variables have to be correlated. Let's check out the graph of "Peak-rpm" as a predictor variable for "price".
```
sns.regplot(x="peak-rpm", y="price", data=df);
```
From the graph, it's clear that peak rpm is a bad indicator of price. It seems that there is no relationship between them. It seems almost random.
A quick check at the correlation value confirms this. The value is -0.1. It's very close to zero, implying no relationship.
Although there are cases in which low value can be misguiding, it's usually only for relationships that show a non-linear relationship in which value goes down and up. But the graph confirms there is none.
## Relationship between Numerical and Categorical data
Categorical variables, like their name imply, divide the data into certain categories. They essentially describe a 'characteristic' of the data unit, and are often selected from a small group of categories.
Although they commonly have "object" type, it's possible to have them has "int64" too (for example 'Level of happiness').
### Visualizing with Boxplots
Boxplots are a great way to visualize such relationships. Boxplots essentially show the spread of the data. You can use the `boxplot()` function in the seaborn package. Alternatively, you can use boxen or violin plots too.
Here's an example by plotting relationship between "body-style" and "price"
```
sns.boxplot(x="body-style", y="price", data=df);
```
We can infer that there is likely to be no significant relationship as there is a decent over lap.
Let's examine engine "engine-location" and "price"
```
sns.boxplot(x="engine-location", y="price", data=df);
```
Although there are a lot of outliers for the front, the distribution of price between these two engine-location categories is distinct enough to take engine-location as a potential good predictor of price.
Let's examine "drive-wheels" and "price".
```
sns.boxplot(x="drive-wheels", y="price", data=df);
```
<p>Here we see that the distribution of price between the different drive-wheels categories differs; as such drive-wheels could potentially be a predictor of price.</p>
### Statistical method to checking for a significant realtionship - ANOVA
Although visualisation is helpful, it does not give us a concrete and certain vision in this (and often in others) case. So, it follows that we would want a metric to evaluate it by. For correlation between categorical and continuous variable, there are various tests. ANOVA family of tests is a common one to use.
The Analysis of Variance (ANOVA) is a statistical method used to test whether there are significant differences between the means of two or more groups.
Do note that ANOVA is an _omnibus_ test statistic and it can't tell you what groups are the ones that have correlation among them. Only that there are at least two groups with a significant difference.
In python, we can calculate the ANOVA statistic fairly easily using the `scipy.stats` module. The function `f_oneway()` calculates and returns:
__F-test score__: ANOVA assumes the means of all groups are the same, calculates how much the actual means deviate from the assumption, and reports it as the F-test score. A larger score means there is a larger difference between the means. Although the degree of the 'largeneess' differs from data to data. You can use the F-table to find out the critical F-value by using the significance level and degrees of freedom for numerator and denominator and compare it with the calculated F-test score.
__P-value__: P-value tells how statistically significant is our calculated score value.
If the variables are strongly correlated, the expectation is to have ANOVA to return a sizeable F-test score and a small p-value.
#### Drive Wheels
Since ANOVA analyzes the difference between different groups of the same variable, the `groupby()` function will come in handy. With this, we can easily and concisely seperate the dataset into groups of drive-wheels. Essentially, the function allows us to split the dataset into groups and perform calculations on groups moving forward. Check out Grouping below for more explanation.
Let's see if different types 'drive-wheels' impact 'price', we group the data.
```
grouped_anova = df[['drive-wheels', 'price']].groupby(['drive-wheels'])
grouped_anova.head(2)
```
We can obtain the values of the method group using the method `get_group()`
```
grouped_anova.get_group('4wd')['price']
```
Finally, we use the function `f_oneway()` to obtain the F-test score and P-value.
```
# ANOVA
f_val, p_val = stats.f_oneway(grouped_anova.get_group('fwd')['price'], grouped_anova.get_group('rwd')['price'], grouped_anova.get_group('4wd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
From the result, we can see that we have a large F-test score and a very small p-value. Still, we need to check if all three tested groups are highly correlated?
#### Separately: fwd and rwd
```
f_val, p_val = stats.f_oneway(grouped_anova.get_group('fwd')['price'], grouped_anova.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val )
```
Seems like the result is significant and they are correlated. Let's examine the other groups
#### 4wd and rwd
```
f_val, p_val = stats.f_oneway(grouped_anova.get_group('4wd')['price'], grouped_anova.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
<h4>4wd and fwd</h4>
```
f_val, p_val = stats.f_oneway(grouped_anova.get_group('4wd')['price'], grouped_anova.get_group('fwd')['price'])
print("ANOVA results: F=", f_val, ", P =", p_val)
```
## Relationship between Categorical Data: Corrected Cramer's V
A good way to test relation between two categorical variable is Corrected Cramer's V.
**Note:** A p-value close to zero means that our variables are very unlikely to be completely unassociated in some population. However, this does not mean the variables are strongly associated; a weak association in a large sample size may also result in p = 0.000.
**General Rule of Thumb:**
* V ∈ [0.1,0.3]: weak association
* V ∈ [0.4,0.5]: medium association
* V > 0.5: strong association
Here's how to do it in python:
```python
import scipy.stats as ss
import pandas as pd
import numpy as np
def cramers_corrected_stat(x, y):
""" calculate Cramers V statistic for categorial-categorial association.
uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
"""
result = -1
if len(x.value_counts()) == 1:
print("First variable is constant")
elif len(y.value_counts()) == 1:
print("Second variable is constant")
else:
conf_matrix = pd.crosstab(x, y)
if conf_matrix.shape[0] == 2:
correct = False
else:
correct = True
chi2, p = ss.chi2_contingency(conf_matrix, correction=correct)[0:2]
n = sum(conf_matrix.sum())
phi2 = chi2/n
r, k = conf_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
result = np.sqrt(phi2corr / min((kcorr-1), (rcorr-1)))
return round(result, 6), round(p, 6)
```
## Descriptive Statistical Analysis
Although the insights gained above are significant, it's clear we need more work.
Since we are exploring the data, performing some common and useful descriptive statistical analysis would be nice. However, there are a lot of them and would require a lot of work to do them by scratch. Fortunately, `pandas` library has a neat method that computes all of them for us.
The `describe()` method, when invoked on a dataframe automatically computes basic statistics for all continuous variables. Do note that any NaN values are automatically skipped in these statistics. By default, it will show stats for numerical data.
Here's what it will show:
* Count of that variable
* Mean
* Standard Deviation (std)
* Minimum Value
* IQR (Interquartile Range: 25%, 50% and 75%)
* Maximum Value
If you want, you can change the percentiles too. Check out the docs for that.
Here's how to do it in our dataframe:
```
df.describe()
```
To get the information about categorical variables, we need to specifically tell it to pandas to include them.
For categorical variables, it shows:
* Count
* Unique values
* The most common value or 'top'
* Frequency of the 'top'
```
df.describe(include=['object'])
```
### Value Counts
Sometimes, we need to understand the distribution of the categorical data. This could mean understanding how many units of each characteristic/variable we have. `value_counts()` is a method in pandas that can help with it. If we use it with a series, it will give us the unique values and how many of them exist.
_Caution:_ Using it with DataFrame works like count of unique rows by combination of all columns (like in SQL). This may or may not be what you want. For example, using it with drive-wheels and engine-location would give you the number of rows with unique pair of values.
Here's an example of doing it with the drive-wheels column.
```
df['drive-wheels'].value_counts().to_frame()
```
`.to_frame()` method is added to make it into a dataframe, hence making it look better.
You can play around and rename the column and index name if you want.
We can repeat the above process for the variable 'engine-location'.
```
df['engine-location'].value_counts().to_frame()
```
Examining the value counts of the engine location would not be a good predictor variable for the price. This is because we only have three cars with a rear engine and 198 with an engine in the front, this result is skewed. Thus, we are not able to draw any conclusions about the engine location.
## Grouping
Grouping is a useful technique to explore the data. With grouping, we can split data and apply various transforms. For example, we can find out the mean of different body styles. This would help us to have more insight into whether there's a relationsip between our target variable and the variable we are using grouping on.
Although oftenly used on categorical data, grouping can also be used with numerical data by seperating them into categories. For example we might seperate car by prices into affordable and luxury groups.
In pandas, we can use the `groupby()` method.
Let's try it with the 'drive-wheels' variable. First we will find out how many unique values there are. We do that by `unique()` method.
```
df['drive-wheels'].unique()
```
If we want to know, on average, which type of drive wheel is most valuable, we can group "drive-wheels" and then average them.
```
df[['drive-wheels','body-style','price']].groupby(['drive-wheels']).mean()
```
From our data, it seems rear-wheel drive vehicles are, on average, the most expensive, while 4-wheel and front-wheel are approximately the same in price.
It's also possible to group with multiple variables. For example, let's group by both 'drive-wheels' and 'body-style'. This groups the dataframe by the unique combinations 'drive-wheels' and 'body-style'.
Let's store it in the variable `grouped_by_wheels_and_body`.
```
grouped_by_wheels_and_body = df[['drive-wheels','body-style','price']].groupby(['drive-wheels','body-style']).mean()
grouped_by_wheels_and_body
```
Although incredibly useful, it's a little hard to read. It's better to convert it to a pivot table.
A pivot table is like an Excel spreadsheet, with one variable along the column and another along the row. There are various ways to do so. A way to do that is to use the method `pivot()`. However, with groups like the one above (multi-index), one can simply call the `unstack()` method.
```
grouped_by_wheels_and_body = grouped_by_wheels_and_body.unstack()
grouped_by_wheels_and_body
```
Often, we won't have data for some of the pivot cells. Often, it's filled with the value 0, but any other value could potentially be used as well. This could be mean or some other flag.
```
grouped_by_wheels_and_body.fillna(0)
```
Let's do the same for body-style only
```
df[['price', 'body-style']].groupby('body-style').mean()
```
### Visualizing Groups
Heatmaps are a great way to visualize groups. They can show relationships clearly in this case.
Do note that you need to be careful with the color schemes. Since chosing appropriate colorscheme is not only appropriate for your 'story' of the data, it is also important since it can impact the perception of the data.
[This resource](https://matplotlib.org/tutorials/colors/colormaps.html) gives a great idea on what to choose as a color scheme and when it's appropriate. It also has samples of the scheme below too for a quick preview along with when should one use them.
Here's an example of using it with the pivot table we created with the `seaborn` package.
```
sns.heatmap(grouped_by_wheels_and_body, cmap="Blues");
```
This heatmap plots the target variable (price) proportional to colour with respect to the variables 'drive-wheel' and 'body-style' in the vertical and horizontal axis respectively. This allows us to visualize how the price is related to 'drive-wheel' and 'body-style'.
## Correlation and Causation
Correlation and causation are terms that are used often and confused with each other--or worst considered to imply the other. Here's a quick overview of them:
__Correlation__: The degree of association (or resemblance) of variables with each other.
__Causation__: A relationship of cause and effect between variables.
It is important to know the difference between these two.
Note that correlation does __not__ imply causation.
Determining correlation is much simpler. We can almost always use methods such as Pearson Correlation, ANOVA method, and graphs. Determining causation may require independent experimentation.
### Pearson Correlation
Described earlier, Pearson Correlation is great way to measure linear dependence between two variables. It's also the default method in the method corr.
```
df.corr()
```
### Cramer's V
Cramer's V is a great method to calculate the relationship between two categorical variables. Read above about Cramer's V to get a better estimate.
**General Rule of Thumb:**
* V ∈ [0.1,0.3]: weak association
* V ∈ [0.4,0.5]: medium association
* V > 0.5: strong association
### ANOVA Method
As discussed previously, ANOVA method is great to conduct analysis to determine whether there's a significant realtionship between categorical and continous variables. Check out the ANOVA section above for more details.
Now, just knowing the correlation statistics is not enough. We also need to know whether the relationship is statistically significant or not. We can use p-value for that.
### P-value
In very simple terms, p-value checks the probability whether the result we have could be just a random chance. For example, for a p-value of 0.05, we are certain that our results are insignificant about 5% of time and are significant 95% of the time.
It's recommended to define a tolerance level of the p-value beforehand. Here's some common interpretations of p-value:
* The p-value is $<$ 0.001: A strong evidence that the correlation is significant.
* The p-value is $<$ 0.05: A moderate evidence that the correlation is significant.
* The p-value is $<$ 0.1: A weak evidence that the correlation is significant.
* The p-value is $>$ 0.1: No evidence that the correlation is significant.
We can obtain this information using `stats` module in the `scipy` library.
Let's calculate it for wheel-base vs price
```
pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
```
Since the p-value is $<$ 0.001, the correlation between wheel-base and price is statistically significant, although the linear relationship isn't extremely strong (~0.585)
Let's try one more example: horsepower vs price.
```
pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
Since the p-value is $<$ 0.001, the correlation between horsepower and price is statistically significant, and the linear relationship is quite strong (~0.809, close to 1).
### Conclusion: Important Variables
We now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. Some more analysis later, we can find that the important variables are:
Continuous numerical variables:
* Length
* Width
* Curb-weight
* Engine-size
* Horsepower
* City-mpg
* Highway-mpg
* Wheel-base
* Bore
Categorical variables:
* Drive-wheels
If needed, we can now mone onto into building machine learning models as we now know what to feed our model.
P.S. [This medium article](https://medium.com/@outside2SDs/an-overview-of-correlation-measures-between-categorical-and-continuous-variables-4c7f85610365#:~:text=A%20simple%20approach%20could%20be,variance%20of%20the%20continuous%20variable.&text=If%20the%20variables%20have%20no,similar%20to%20the%20original%20variance) is a great resource that talks about various ways of correlation between categorical and continous variables.
## Author
By Abhinav Garg
| github_jupyter |
```
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from datetime import datetime
%matplotlib inline
import matplotlib
from datetime import datetime
import os
from scipy import stats
from definitions import HUMAN_DATA_DIR, ROOT_DIR
from data.load_from_csv import get_content_datasets
def ClairvoyantCF(test_dataset, train_dataset, answers_dict):
"""Takes datasets and {item_id: True/False} dict and returns
mean mse simply predicting 0/100"""
total_score = 0
for i, rating in enumerate(test_dataset.ratings):
try:
if answers_dict[test_dataset.item_ids[i]]:
total_score += (rating[2] - 1.0)**2
else:
total_score += (rating[2] - 0)**2
except:
print(i, test_dataset.item_ids[i])
mean_mse = total_score / len(test_dataset.ratings)
print("Using Clairvoyant CF, got total val score {:.3f}".format(mean_mse))
return
def ClairvoyantAdjustedCF(test_dataset, train_dataset, answers_dict):
"""Takes datasets and {item_id: True/False} dict and returns
mean mse simply predicting 0/100"""
tot_true = 0
tot_false = 0
true_count = 0
false_count = 0
for i, rating in enumerate(train_dataset.ratings):
if not np.isnan(rating[2]):
if answers_dict[train_dataset.item_ids[i]]:
tot_true += rating[2]
true_count += 1
else:
tot_false += rating[2]
false_count += 1
avg_true = tot_true / true_count
avg_false = tot_false / false_count
total_score = 0
for i, rating in enumerate(test_dataset.ratings):
if answers_dict[test_dataset.item_ids[i]]:
total_score += (rating[2] - avg_true)**2
else:
total_score += (rating[2] - avg_false)**2
mean_mse = total_score / len(test_dataset.ratings)
print("Using Clairvoyant Adjusted CF, got total val score {:.3f}".format(mean_mse))
return
fermi_answers = pd.read_csv(os.path.join(HUMAN_DATA_DIR, 'fermi', 'answers.csv')).drop('Unnamed: 0', axis=1).set_index('item_id').T.to_dict('index')['answer']
politifact_answers = pd.read_csv(os.path.join(HUMAN_DATA_DIR, 'politifact', 'answers.csv')).drop('Unnamed: 0', axis=1).set_index('item_id').T.to_dict('index')['answer']
## Fermi
print('Fermi\nUnmasked:')
unmasked_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='unmasked')
ClairvoyantCF(unmasked_val_fermi, unmasked_fermi, fermi_answers)
ClairvoyantAdjustedCF(unmasked_val_fermi, unmasked_fermi, fermi_answers)
print('\nLight Masking:')
light_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='light')
ClairvoyantCF(unmasked_val_fermi, light_fermi, fermi_answers)
ClairvoyantAdjustedCF(unmasked_val_fermi, light_fermi, fermi_answers)
print('\nHeavy Masking:')
heavy_fermi, unmasked_val_fermi, _ = get_content_datasets(task='fermi', sparsity='heavy')
ClairvoyantCF(unmasked_val_fermi, heavy_fermi, fermi_answers)
ClairvoyantAdjustedCF(unmasked_val_fermi, heavy_fermi, fermi_answers)
## Politifact
print('Politifact\nUnmasked:')
unmasked_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='unmasked')
ClairvoyantCF(unmasked_val_politifact, unmasked_politifact, politifact_answers)
ClairvoyantAdjustedCF(unmasked_val_politifact, unmasked_politifact, politifact_answers)
print('\nPolitifact Masking:')
light_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='light')
ClairvoyantCF(unmasked_val_politifact, light_politifact, politifact_answers)
ClairvoyantAdjustedCF(unmasked_val_politifact, light_politifact, politifact_answers)
print('\nPolitifact Masking:')
heavy_politifact, unmasked_val_politifact, _ = get_content_datasets(task='politifact', sparsity='heavy')
ClairvoyantCF(unmasked_val_politifact, heavy_politifact, politifact_answers)
ClairvoyantAdjustedCF(unmasked_val_politifact, heavy_politifact, politifact_answers)
```
| github_jupyter |
# UCI Daphnet dataset (Freezing of gait for Parkinson's disease patients)
```
import numpy as np
import pandas as pd
import os
from typing import List
from pathlib import Path
from config import data_raw_folder, data_processed_folder
from timeeval import Datasets
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (20, 10)
dataset_collection_name = "Daphnet"
source_folder = Path(data_raw_folder) / "UCI ML Repository/Daphnet/dataset"
target_folder = Path(data_processed_folder)
print(f"Looking for source datasets in {source_folder.absolute()} and\nsaving processed datasets in {target_folder.absolute()}")
train_type = "unsupervised"
train_is_normal = False
input_type = "multivariate"
datetime_index = True
dataset_type = "real"
# create target directory
dataset_subfolder = os.path.join(input_type, dataset_collection_name)
target_subfolder = os.path.join(target_folder, dataset_subfolder)
try:
os.makedirs(target_subfolder)
print(f"Created directories {target_subfolder}")
except FileExistsError:
print(f"Directories {target_subfolder} already exist")
pass
dm = Datasets(target_folder)
experiments = [f for f in source_folder.iterdir()]
experiments
columns = ["timestamp", "ankle_horiz_fwd", "ankle_vert", "ankle_horiz_lateral", "leg_horiz_fwd", "leg_vert", "leg_horiz_lateral",
"trunk_horiz_fwd", "trunk_vert", "trunk_horiz_lateral", "is_anomaly"]
def transform_experiment_file(path: Path) -> List[pd.DataFrame]:
df = pd.read_csv(path, sep=" ", header=None)
df.columns = columns
df["timestamp"] = pd.to_datetime(df["timestamp"], unit="ms")
# slice out experiments (0 annotation shows unrelated data points (preparation/briefing/...))
s_group = df["is_anomaly"].isin([1, 2])
s_diff = s_group.shift(-1) - s_group
starts = (df[s_diff == 1].index + 1).values # first point has annotation 0 --> index + 1
ends = df[s_diff == -1].index.values
dfs = []
for start, end in zip(starts, ends):
df1 = df.iloc[start:end].copy()
df1["is_anomaly"] = (df1["is_anomaly"] == 2).astype(int)
dfs.append(df1)
return dfs
for exp in experiments:
# transform file to get datasets
datasets = transform_experiment_file(exp)
for i, df in enumerate(datasets):
# get target filenames
experiment_name = os.path.splitext(exp.name)[0]
dataset_name = f"{experiment_name}E{i}"
filename = f"{dataset_name}.test.csv"
path = os.path.join(dataset_subfolder, filename)
target_filepath = os.path.join(target_subfolder, filename)
# calc length and save in file
dataset_length = len(df)
df.to_csv(target_filepath, index=False)
print(f"Processed source dataset {exp} -> {target_filepath}")
# save metadata
dm.add_dataset((dataset_collection_name, dataset_name),
train_path = None,
test_path = path,
dataset_type = dataset_type,
datetime_index = datetime_index,
split_at = None,
train_type = train_type,
train_is_normal = train_is_normal,
input_type = input_type,
dataset_length = dataset_length
)
dm.save()
dm.refresh()
dm.df().loc[(slice(dataset_collection_name,dataset_collection_name), slice(None))]
```
## Experimentation
Annotations
- `0`: not part of the experiment.
For instance the sensors are installed on the user or the user is performing activities unrelated to the experimental protocol, such as debriefing
- `1`: experiment, no freeze (can be any of stand, walk, turn)
- `2`: freeze
```
columns = ["timestamp", "ankle_horiz_fwd", "ankle_vert", "ankle_horiz_lateral", "leg_horiz_fwd", "leg_vert", "leg_horiz_lateral",
"trunk_horiz_fwd", "trunk_vert", "trunk_horiz_lateral", "annotation"]
df1 = pd.read_csv(source_folder / "S01R01.txt", sep=' ', header=None)
df1.columns = columns
df1["timestamp"] = pd.to_datetime(df1["timestamp"], unit="ms")
df1
columns = [c for c in columns if c not in ["timestamp", "annotation"]]
df_plot = df1.set_index("timestamp", drop=True)#.loc["1970-01-01 00:15:00":"1970-01-01 00:16:00"]
df_plot.plot(y=columns, figsize=(20,10))
df_plot["annotation"].plot(secondary_y=True)
plt.legend()
plt.show()
s_group = df1["annotation"].isin([1, 2])
s_diff = s_group.shift(-1) - s_group
starts = (df1[s_diff == 1].index + 1).values
ends = df1[s_diff == -1].index.values
starts, ends
dfs = [df1.iloc[start:end] for start, end in zip(starts, ends)]
len(dfs)
columns = [c for c in columns if c not in ["timestamp", "annotation"]]
for df in dfs:
df = df.set_index("timestamp", drop=True)
df.plot(y=columns, figsize=(20,10))
df["annotation"].plot(secondary_y=True)
plt.show()
```
| github_jupyter |
# SDLib
> Shilling simulated attacks and detection methods
## Setup
```
!mkdir -p results
```
### Imports
```
from collections import defaultdict
import numpy as np
import random
import os
import os.path
from os.path import abspath
from os import makedirs,remove
from re import compile,findall,split
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics.pairwise import pairwise_distances,cosine_similarity
from numpy.linalg import norm
from scipy.stats.stats import pearsonr
from math import sqrt,exp
import sys
from re import split
from multiprocessing import Process,Manager
from time import strftime,localtime,time
import re
from os.path import abspath
from time import strftime,localtime,time
from sklearn.metrics import classification_report
from re import split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from random import shuffle
from sklearn.tree import DecisionTreeClassifier
import time as tm
from sklearn.metrics import classification_report
import numpy as np
from collections import defaultdict
from math import log,exp
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from random import choice
import matplotlib
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import random
from sklearn.metrics import classification_report
import numpy as np
from collections import defaultdict
from math import log,exp
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn import metrics
import scipy
from scipy.sparse import csr_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import math
from sklearn.naive_bayes import GaussianNB
```
## Data
```
!mkdir -p dataset/amazon
!cd dataset/amazon && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/amazon/profiles.txt
!cd dataset/amazon && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/amazon/labels.txt
!mkdir -p dataset/averageattack
!cd dataset/averageattack && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/averageattack/ratings.txt
!cd dataset/averageattack && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/averageattack/labels.txt
!mkdir -p dataset/filmtrust
!cd dataset/filmtrust && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/filmtrust/ratings.txt
!cd dataset/filmtrust && wget -q --show-progress https://github.com/Coder-Yu/SDLib/raw/master/dataset/filmtrust/trust.txt
```
## Config
### Configure the Detection Method
<div>
<table class="table table-hover table-bordered">
<tr>
<th width="12%" scope="col"> Entry</th>
<th width="16%" class="conf" scope="col">Example</th>
<th width="72%" class="conf" scope="col">Description</th>
</tr>
<tr>
<td>ratings</td>
<td>dataset/averageattack/ratings.txt</td>
<td>Set the path to the dirty recommendation dataset. Format: each row separated by empty, tab or comma symbol. </td>
</tr>
<tr>
<td>label</td>
<td>dataset/averageattack/labels.txt</td>
<td>Set the path to labels (for users). Format: each row separated by empty, tab or comma symbol. </td>
</tr>
<tr>
<td scope="row">ratings.setup</td>
<td>-columns 0 1 2</td>
<td>-columns: (user, item, rating) columns of rating data are used;
-header: to skip the first head line when reading data<br>
</td>
</tr>
<tr>
<td scope="row">MethodName</td>
<td>DegreeSAD/PCASelect/etc.</td>
<td>The name of the detection method<br>
</td>
</tr>
<tr>
<td scope="row">evaluation.setup</td>
<td>-testSet dataset/testset.txt</td>
<td>Main option: -testSet, -ap, -cv <br>
-testSet path/to/test/file (need to specify the test set manually)<br>
-ap ratio (ap means that the user set (including items and ratings) are automatically partitioned into training set and test set, the number is the ratio of test set. e.g. -ap 0.2)<br>
-cv k (-cv means cross validation, k is the number of the fold. e.g. -cv 5)<br>
</td>
</tr>
<tr>
<td scope="row">output.setup</td>
<td>on -dir Results/</td>
<td>Main option: whether to output recommendation results<br>
-dir path: the directory path of output results.
</td>
</tr>
</table>
</div>
### Configure the Shilling Model
<div>
<table class="table table-hover table-bordered">
<tr>
<th width="12%" scope="col"> Entry</th>
<th width="16%" class="conf" scope="col">Example</th>
<th width="72%" class="conf" scope="col">Description</th>
</tr>
<tr>
<td>ratings</td>
<td>dataset/averageattack/ratings.txt</td>
<td>Set the path to the recommendation dataset. Format: each row separated by empty, tab or comma symbol. </td>
</tr>
<tr>
<td scope="row">ratings.setup</td>
<td>-columns 0 1 2</td>
<td>-columns: (user, item, rating) columns of rating data are used;
-header: to skip the first head line when reading data<br>
</td>
</tr>
<tr>
<td>attackSize</td>
<td>0.01</td>
<td>The ratio of the injected spammers to genuine users</td>
</tr>
<tr>
<td>fillerSize</td>
<td>0.01</td>
<td>The ratio of the filler items to all items </td>
</tr>
<tr>
<td>selectedSize</td>
<td>0.001</td>
<td>The ratio of the selected items to all items </td>
</tr>
<tr>
<td>linkSize</td>
<td>0.01</td>
<td>The ratio of the users maliciously linked by a spammer to all user </td>
</tr>
<tr>
<td>targetCount</td>
<td>20</td>
<td>The count of the targeted items </td>
</tr>
<tr>
<td>targetScore</td>
<td>5.0</td>
<td>The score given to the target items</td>
</tr>
<tr>
<td>threshold</td>
<td>3.0</td>
<td>Item has an average score lower than threshold may be chosen as one of the target items</td>
</tr>
<tr>
<td>minCount</td>
<td>3</td>
<td>Item has a ratings count larger than minCount may be chosen as one of the target items</td>
</tr>
<tr>
<td>maxCount</td>
<td>50</td>
<td>Item has a rating count smaller that maxCount may be chosen as one of the target items</td>
</tr>
<tr>
<td scope="row">outputDir</td>
<td>data/</td>
<td> User profiles and labels will be output here </td>
</tr>
</table>
</div>
```
%%writefile BayesDetector.conf
ratings=dataset/amazon/profiles.txt
ratings.setup=-columns 0 1 2
label=dataset/amazon/labels.txt
methodName=BayesDetector
evaluation.setup=-cv 5
item.ranking=off -topN 50
num.max.iter=100
learnRate=-init 0.03 -max 0.1
reg.lambda=-u 0.3 -i 0.3
BayesDetector=-k 10 -negCount 256 -gamma 1 -filter 4 -delta 0.01
output.setup=on -dir results/
%%writefile CoDetector.conf
ratings=dataset/amazon/profiles.txt
ratings.setup=-columns 0 1 2
label=dataset/amazon/labels.txt
methodName=CoDetector
evaluation.setup=-ap 0.3
item.ranking=on -topN 50
num.max.iter=200
learnRate=-init 0.01 -max 0.01
reg.lambda=-u 0.8 -i 0.4
CoDetector=-k 10 -negCount 256 -gamma 1 -filter 4
output.setup=on -dir results/amazon/
%%writefile DegreeSAD.conf
ratings=dataset/amazon/profiles.txt
ratings.setup=-columns 0 1 2
label=dataset/amazon/labels.txt
methodName=DegreeSAD
evaluation.setup=-cv 5
output.setup=on -dir results/
%%writefile FAP.conf
ratings=dataset/averageattack/ratings.txt
ratings.setup=-columns 0 1 2
label=dataset/averageattack/labels.txt
methodName=FAP
evaluation.setup=-ap 0.000001
seedUser=350
topKSpam=1557
output.setup=on -dir results/
%%writefile PCASelectUsers.conf
ratings=dataset/averageattack/ratings.txt
ratings.setup=-columns 0 1 2
label=dataset/averageattack/labels.txt
methodName=PCASelectUsers
evaluation.setup=-ap 0.00001
kVals=3
attackSize=0.1
output.setup=on -dir results/
%%writefile SemiSAD.conf
ratings=dataset/averageattack/ratings.txt
ratings.setup=-columns 0 1 2
label=dataset/averageattack/labels.txt
methodName=SemiSAD
evaluation.setup=-ap 0.2
Lambda=0.5
topK=28
output.setup=on -dir results/
```
## Baseclass
```
class SDetection(object):
def __init__(self,conf,trainingSet=None,testSet=None,labels=None,fold='[1]'):
self.config = conf
self.isSave = False
self.isLoad = False
self.foldInfo = fold
self.labels = labels
self.dao = RatingDAO(self.config, trainingSet, testSet)
self.training = []
self.trainingLabels = []
self.test = []
self.testLabels = []
def readConfiguration(self):
self.algorName = self.config['methodName']
self.output = LineConfig(self.config['output.setup'])
def printAlgorConfig(self):
"show algorithm's configuration"
print('Algorithm:',self.config['methodName'])
print('Ratings dataSet:',abspath(self.config['ratings']))
if LineConfig(self.config['evaluation.setup']).contains('-testSet'):
print('Test set:',abspath(LineConfig(self.config['evaluation.setup']).getOption('-testSet')))
#print 'Count of the users in training set: ',len()
print('Training set size: (user count: %d, item count %d, record count: %d)' %(self.dao.trainingSize()))
print('Test set size: (user count: %d, item count %d, record count: %d)' %(self.dao.testSize()))
print('='*80)
def initModel(self):
pass
def buildModel(self):
pass
def saveModel(self):
pass
def loadModel(self):
pass
def predict(self):
pass
def execute(self):
self.readConfiguration()
if self.foldInfo == '[1]':
self.printAlgorConfig()
# load model from disk or build model
if self.isLoad:
print('Loading model %s...' % (self.foldInfo))
self.loadModel()
else:
print('Initializing model %s...' % (self.foldInfo))
self.initModel()
print('Building Model %s...' % (self.foldInfo))
self.buildModel()
# preict the ratings or item ranking
print('Predicting %s...' % (self.foldInfo))
prediction = self.predict()
report = classification_report(self.testLabels, prediction, digits=4)
currentTime = currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
FileIO.writeFile(self.output['-dir'],self.algorName+'@'+currentTime+self.foldInfo,report)
# save model
if self.isSave:
print('Saving model %s...' % (self.foldInfo))
self.saveModel()
print(report)
return report
class SSDetection(SDetection):
def __init__(self,conf,trainingSet=None,testSet=None,labels=None,relation=list(),fold='[1]'):
super(SSDetection, self).__init__(conf,trainingSet,testSet,labels,fold)
self.sao = SocialDAO(self.config, relation) # social relations access control
```
## Utils
```
class Config(object):
def __init__(self,fileName):
self.config = {}
self.readConfiguration(fileName)
def __getitem__(self, item):
if not self.contains(item):
print('parameter '+item+' is invalid!')
exit(-1)
return self.config[item]
def getOptions(self,item):
if not self.contains(item):
print('parameter '+item+' is invalid!')
exit(-1)
return self.config[item]
def contains(self,key):
return key in self.config
def readConfiguration(self,fileName):
if not os.path.exists(abspath(fileName)):
print('config file is not found!')
raise IOError
with open(fileName) as f:
for ind,line in enumerate(f):
if line.strip()!='':
try:
key,value=line.strip().split('=')
self.config[key]=value
except ValueError:
print('config file is not in the correct format! Error Line:%d'%(ind))
class LineConfig(object):
def __init__(self,content):
self.line = content.strip().split(' ')
self.options = {}
self.mainOption = False
if self.line[0] == 'on':
self.mainOption = True
elif self.line[0] == 'off':
self.mainOption = False
for i,item in enumerate(self.line):
if (item.startswith('-') or item.startswith('--')) and not item[1:].isdigit():
ind = i+1
for j,sub in enumerate(self.line[ind:]):
if (sub.startswith('-') or sub.startswith('--')) and not sub[1:].isdigit():
ind = j
break
if j == len(self.line[ind:])-1:
ind=j+1
break
try:
self.options[item] = ' '.join(self.line[i+1:i+1+ind])
except IndexError:
self.options[item] = 1
def __getitem__(self, item):
if not self.contains(item):
print('parameter '+item+' is invalid!')
exit(-1)
return self.options[item]
def getOption(self,key):
if not self.contains(key):
print('parameter '+key+' is invalid!')
exit(-1)
return self.options[key]
def isMainOn(self):
return self.mainOption
def contains(self,key):
return key in self.options
class FileIO(object):
def __init__(self):
pass
@staticmethod
def writeFile(dir,file,content,op = 'w'):
if not os.path.exists(dir):
os.makedirs(dir)
if type(content)=='str':
with open(dir + file, op) as f:
f.write(content)
else:
with open(dir+file,op) as f:
f.writelines(content)
@staticmethod
def deleteFile(filePath):
if os.path.exists(filePath):
remove(filePath)
@staticmethod
def loadDataSet(conf, file, bTest=False):
trainingData = defaultdict(dict)
testData = defaultdict(dict)
ratingConfig = LineConfig(conf['ratings.setup'])
if not bTest:
print('loading training data...')
else:
print('loading test data...')
with open(file) as f:
ratings = f.readlines()
# ignore the headline
if ratingConfig.contains('-header'):
ratings = ratings[1:]
# order of the columns
order = ratingConfig['-columns'].strip().split()
for lineNo, line in enumerate(ratings):
items = split(' |,|\t', line.strip())
if not bTest and len(order) < 3:
print('The rating file is not in a correct format. Error: Line num %d' % lineNo)
exit(-1)
try:
userId = items[int(order[0])]
itemId = items[int(order[1])]
if bTest and len(order)<3:
rating = 1 #default value
else:
rating = items[int(order[2])]
except ValueError:
print('Error! Have you added the option -header to the rating.setup?')
exit(-1)
if not bTest:
trainingData[userId][itemId]=float(rating)
else:
testData[userId][itemId] = float(rating)
if not bTest:
return trainingData
else:
return testData
@staticmethod
def loadRelationship(conf, filePath):
socialConfig = LineConfig(conf['social.setup'])
relation = []
print('loading social data...')
with open(filePath) as f:
relations = f.readlines()
# ignore the headline
if socialConfig.contains('-header'):
relations = relations[1:]
# order of the columns
order = socialConfig['-columns'].strip().split()
if len(order) <= 2:
print('The social file is not in a correct format.')
for lineNo, line in enumerate(relations):
items = split(' |,|\t', line.strip())
if len(order) < 2:
print('The social file is not in a correct format. Error: Line num %d' % lineNo)
exit(-1)
userId1 = items[int(order[0])]
userId2 = items[int(order[1])]
if len(order) < 3:
weight = 1
else:
weight = float(items[int(order[2])])
relation.append([userId1, userId2, weight])
return relation
@staticmethod
def loadLabels(filePath):
labels = {}
with open(filePath) as f:
for line in f:
items = split(' |,|\t', line.strip())
labels[items[0]] = items[1]
return labels
class DataSplit(object):
def __init__(self):
pass
@staticmethod
def dataSplit(data,test_ratio = 0.3,output=False,path='./',order=1):
if test_ratio>=1 or test_ratio <=0:
test_ratio = 0.3
testSet = {}
trainingSet = {}
for user in data:
if random.random() < test_ratio:
testSet[user] = data[user].copy()
else:
trainingSet[user] = data[user].copy()
if output:
FileIO.writeFile(path,'testSet['+str(order)+']',testSet)
FileIO.writeFile(path, 'trainingSet[' + str(order) + ']', trainingSet)
return trainingSet,testSet
@staticmethod
def crossValidation(data,k,output=False,path='./',order=1):
if k<=1 or k>10:
k=3
for i in range(k):
trainingSet = {}
testSet = {}
for ind,user in enumerate(data):
if ind%k == i:
testSet[user] = data[user].copy()
else:
trainingSet[user] = data[user].copy()
yield trainingSet,testSet
def drawLine(x,y,labels,xLabel,yLabel,title):
f, ax = plt.subplots(1, 1, figsize=(10, 6), sharex=True)
#f.tight_layout()
#sns.set(style="darkgrid")
palette = ['blue','orange','red','green','purple','pink']
# for i in range(len(ax)):
# x1 = range(0, len(x))
#ax.set_xlim(min(x1)-0.2,max(x1)+0.2)
# mini = 10000;max = -10000
# for label in labels:
# if mini>min(y[i][label]):
# mini = min(y[i][label])
# if max<max(y[i][label]):
# max = max(y[i][label])
# ax[i].set_ylim(mini-0.25*(max-mini),max+0.25*(max-mini))
# for j,label in enumerate(labels):
# if j%2==1:
# ax[i].plot(x1, y[i][label], color=palette[j/2], marker='.', label=label, markersize=12)
# else:
# ax[i].plot(x1, y[i][label], color=palette[j/2], marker='.', label=label,markersize=12,linestyle='--')
# ax[0].set_ylabel(yLabel,fontsize=20)
for xdata,ydata,lab,c in zip(x,y,labels,palette):
ax.plot(xdata,ydata,color = c,label=lab)
ind = np.arange(0,60,10)
ax.set_xticks(ind)
#ax.set_xticklabels(x)
ax.set_xlabel(xLabel, fontsize=20)
ax.set_ylabel(yLabel, fontsize=20)
ax.tick_params(labelsize=16)
#ax.tick_params(axs='y', labelsize=20)
ax.set_title(title,fontsize=24)
plt.grid(True)
handles, labels1 = ax.get_legend_handles_labels()
#ax[i].legend(handles, labels1, loc=2, fontsize=20)
# ax.legend(loc=2,
# ncol=6, borderaxespad=0.,fontsize=20)
#ax[2].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize=20)
ax.legend(loc='upper right',fontsize=20,shadow=True)
plt.show()
plt.close()
paths = ['SVD.txt','PMF.txt','EE.txt','RDML.txt']
files = ['EE['+str(i)+'] iteration.txt' for i in range(2,9)]
x = []
y = []
data = []
def normalize():
for file in files:
xdata = []
with open(file) as f:
for line in f:
items = line.strip().split()
rmse = items[2].split(':')[1]
xdata.append(float(rmse))
data.append(xdata)
average = []
for i in range(len(data[0])):
total = 0
for k in range(len(data)):
total += data[k][i]
average.append(str(i+1)+':'+str(float(total)/len(data))+'\n')
with open('EE.txt','w') as f:
f.writelines(average)
def readData():
for file in paths:
xdata = []
ydata = []
with open(file) as f:
for line in f:
items = line.strip().split(':')
xdata.append(int(items[0]))
rmse = float(items[1])
ydata.append(float(rmse))
x.append(xdata)
y.append(ydata)
# x = [[1,2,3],[1,2,3]]
# y = [[1,2,3],[4,5,6]]
#normalize()
readData()
labels = ['SVD','PMF','EE','RDML',]
xlabel = 'Iteration'
ylabel = 'RMSE'
drawLine(x,y,labels,xlabel,ylabel,'')
def l1(x):
return norm(x,ord=1)
def l2(x):
return norm(x)
def common(x1,x2):
# find common ratings
common = (x1!=0)&(x2!=0)
new_x1 = x1[common]
new_x2 = x2[common]
return new_x1,new_x2
def cosine_sp(x1,x2):
'x1,x2 are dicts,this version is for sparse representation'
total = 0
denom1 = 0
denom2 =0
for k in x1:
if k in x2:
total+=x1[k]*x2[k]
denom1+=x1[k]**2
denom2+=x2[k]**2
try:
return (total + 0.0) / (sqrt(denom1) * sqrt(denom2))
except ZeroDivisionError:
return 0
def cosine(x1,x2):
#find common ratings
new_x1, new_x2 = common(x1,x2)
#compute the cosine similarity between two vectors
sum = new_x1.dot(new_x2)
denom = sqrt(new_x1.dot(new_x1)*new_x2.dot(new_x2))
try:
return float(sum)/denom
except ZeroDivisionError:
return 0
#return cosine_similarity(x1,x2)[0][0]
def pearson_sp(x1,x2):
total = 0
denom1 = 0
denom2 = 0
overlapped=False
try:
mean1 = sum(x1.values())/(len(x1)+0.0)
mean2 = sum(x2.values()) / (len(x2) + 0.0)
for k in x1:
if k in x2:
total += (x1[k]-mean1) * (x2[k]-mean2)
denom1 += (x1[k]-mean1) ** 2
denom2 += (x2[k]-mean2) ** 2
overlapped=True
return (total + 0.0) / (sqrt(denom1) * sqrt(denom2))
except ZeroDivisionError:
if overlapped:
return 1
else:
return 0
def euclidean(x1,x2):
#find common ratings
new_x1, new_x2 = common(x1, x2)
#compute the euclidean between two vectors
diff = new_x1-new_x2
denom = sqrt((diff.dot(diff)))
try:
return 1/denom
except ZeroDivisionError:
return 0
def pearson(x1,x2):
#find common ratings
new_x1, new_x2 = common(x1, x2)
#compute the pearson similarity between two vectors
ind1 = new_x1 > 0
ind2 = new_x2 > 0
try:
mean_x1 = float(new_x1.sum())/ind1.sum()
mean_x2 = float(new_x2.sum())/ind2.sum()
new_x1 = new_x1 - mean_x1
new_x2 = new_x2 - mean_x2
sum = new_x1.dot(new_x2)
denom = sqrt((new_x1.dot(new_x1))*(new_x2.dot(new_x2)))
return float(sum) / denom
except ZeroDivisionError:
return 0
def similarity(x1,x2,sim):
if sim == 'pcc':
return pearson_sp(x1,x2)
if sim == 'euclidean':
return euclidean(x1,x2)
else:
return cosine_sp(x1, x2)
def normalize(vec,maxVal,minVal):
'get the normalized value using min-max normalization'
if maxVal > minVal:
return float(vec-minVal)/(maxVal-minVal)+0.01
elif maxVal==minVal:
return vec/maxVal
else:
print('error... maximum value is less than minimum value.')
raise ArithmeticError
def sigmoid(val):
return 1/(1+exp(-val))
def denormalize(vec,maxVal,minVal):
return minVal+(vec-0.01)*(maxVal-minVal)
```
## Shilling models
### Attack base class
```
class Attack(object):
def __init__(self,conf):
self.config = Config(conf)
self.userProfile = FileIO.loadDataSet(self.config,self.config['ratings'])
self.itemProfile = defaultdict(dict)
self.attackSize = float(self.config['attackSize'])
self.fillerSize = float(self.config['fillerSize'])
self.selectedSize = float(self.config['selectedSize'])
self.targetCount = int(self.config['targetCount'])
self.targetScore = float(self.config['targetScore'])
self.threshold = float(self.config['threshold'])
self.minCount = int(self.config['minCount'])
self.maxCount = int(self.config['maxCount'])
self.minScore = float(self.config['minScore'])
self.maxScore = float(self.config['maxScore'])
self.outputDir = self.config['outputDir']
if not os.path.exists(self.outputDir):
os.makedirs(self.outputDir)
for user in self.userProfile:
for item in self.userProfile[user]:
self.itemProfile[item][user] = self.userProfile[user][item]
self.spamProfile = defaultdict(dict)
self.spamItem = defaultdict(list) #items rated by spammers
self.targetItems = []
self.itemAverage = {}
self.getAverageRating()
self.selectTarget()
self.startUserID = 0
def getAverageRating(self):
for itemID in self.itemProfile:
li = list(self.itemProfile[itemID].values())
self.itemAverage[itemID] = float(sum(li)) / len(li)
def selectTarget(self,):
print('Selecting target items...')
print('-'*80)
print('Target item Average rating of the item')
itemList = list(self.itemProfile.keys())
itemList.sort()
while len(self.targetItems) < self.targetCount:
target = np.random.randint(len(itemList)) #generate a target order at random
if len(self.itemProfile[str(itemList[target])]) < self.maxCount and len(self.itemProfile[str(itemList[target])]) > self.minCount \
and str(itemList[target]) not in self.targetItems \
and self.itemAverage[str(itemList[target])] <= self.threshold:
self.targetItems.append(str(itemList[target]))
print(str(itemList[target]),' ',self.itemAverage[str(itemList[target])])
def getFillerItems(self):
mu = int(self.fillerSize*len(self.itemProfile))
sigma = int(0.1*mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)
return markedItems.tolist()
def insertSpam(self,startID=0):
pass
def loadTarget(self,filename):
with open(filename) as f:
for line in f:
self.targetItems.append(line.strip())
def generateLabels(self,filename):
labels = []
path = self.outputDir + filename
with open(path,'w') as f:
for user in self.spamProfile:
labels.append(user+' 1\n')
for user in self.userProfile:
labels.append(user+' 0\n')
f.writelines(labels)
print('User profiles have been output to '+abspath(self.config['outputDir'])+'.')
def generateProfiles(self,filename):
ratings = []
path = self.outputDir+filename
with open(path, 'w') as f:
for user in self.userProfile:
for item in self.userProfile[user]:
ratings.append(user+' '+item+' '+str(self.userProfile[user][item])+'\n')
for user in self.spamProfile:
for item in self.spamProfile[user]:
ratings.append(user + ' ' + item + ' ' + str(self.spamProfile[user][item])+'\n')
f.writelines(ratings)
print('User labels have been output to '+abspath(self.config['outputDir'])+'.')
```
### Relation attack
```
class RelationAttack(Attack):
def __init__(self,conf):
super(RelationAttack, self).__init__(conf)
self.spamLink = defaultdict(list)
self.relation = FileIO.loadRelationship(self.config,self.config['social'])
self.trustLink = defaultdict(list)
self.trusteeLink = defaultdict(list)
for u1,u2,t in self.relation:
self.trustLink[u1].append(u2)
self.trusteeLink[u2].append(u1)
self.activeUser = {} # 关注了虚假用户的正常用户
self.linkedUser = {} # 被虚假用户种植过链接的用户
# def reload(self):
# super(RelationAttack, self).reload()
# self.spamLink = defaultdict(list)
# self.trustLink, self.trusteeLink = loadTrusts(self.config['social'])
# self.activeUser = {} # 关注了虚假用户的正常用户
# self.linkedUser = {} # 被虚假用户种植过链接的用户
def farmLink(self):
pass
def getReciprocal(self,target):
#当前目标用户关注spammer的概率,依赖于粉丝数和关注数的交集
reciprocal = float(2 * len(set(self.trustLink[target]).intersection(self.trusteeLink[target])) + 0.1) \
/ (len(set(self.trustLink[target]).union(self.trusteeLink[target])) + 1)
reciprocal += (len(self.trustLink[target]) + 0.1) / (len(self.trustLink[target]) + len(self.trusteeLink[target]) + 1)
reciprocal /= 2
return reciprocal
def generateSocialConnections(self,filename):
relations = []
path = self.outputDir + filename
with open(path, 'w') as f:
for u1 in self.trustLink:
for u2 in self.trustLink[u1]:
relations.append(u1 + ' ' + u2 + ' 1\n')
for u1 in self.spamLink:
for u2 in self.spamLink[u1]:
relations.append(u1 + ' ' + u2 + ' 1\n')
f.writelines(relations)
print('Social relations have been output to ' + abspath(self.config['outputDir']) + '.')
```
### Random relation attack
```
class RandomRelationAttack(RelationAttack):
def __init__(self,conf):
super(RandomRelationAttack, self).__init__(conf)
self.scale = float(self.config['linkSize'])
def farmLink(self): # 随机注入虚假关系
for spam in self.spamProfile:
#对购买了目标项目的用户种植链接
for item in self.spamItem[spam]:
if random.random() < 0.01:
for target in self.itemProfile[item]:
self.spamLink[spam].append(target)
response = np.random.random()
reciprocal = self.getReciprocal(target)
if response <= reciprocal:
self.trustLink[target].append(spam)
self.activeUser[target] = 1
else:
self.linkedUser[target] = 1
#对其它用户以scale的比例种植链接
for user in self.userProfile:
if random.random() < self.scale:
self.spamLink[spam].append(user)
response = np.random.random()
reciprocal = self.getReciprocal(user)
if response < reciprocal:
self.trustLink[user].append(spam)
self.activeUser[user] = 1
else:
self.linkedUser[user] = 1
```
### Random attack
```
class RandomAttack(Attack):
def __init__(self,conf):
super(RandomAttack, self).__init__(conf)
def insertSpam(self,startID=0):
print('Modeling random attack...')
itemList = list(self.itemProfile.keys())
if startID == 0:
self.startUserID = len(self.userProfile)
else:
self.startUserID = startID
for i in range(int(len(self.userProfile)*self.attackSize)):
#fill 装填项目
fillerItems = self.getFillerItems()
for item in fillerItems:
self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)
#target 目标项目
for j in range(self.targetCount):
target = np.random.randint(len(self.targetItems))
self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore
self.spamItem[str(self.startUserID)].append(self.targetItems[target])
self.startUserID += 1
class RR_Attack(RandomRelationAttack,RandomAttack):
def __init__(self,conf):
super(RR_Attack, self).__init__(conf)
```
### Average attack
```
class AverageAttack(Attack):
def __init__(self,conf):
super(AverageAttack, self).__init__(conf)
def insertSpam(self,startID=0):
print('Modeling average attack...')
itemList = list(self.itemProfile.keys())
if startID == 0:
self.startUserID = len(self.userProfile)
else:
self.startUserID = startID
for i in range(int(len(self.userProfile)*self.attackSize)):
#fill
fillerItems = self.getFillerItems()
for item in fillerItems:
self.spamProfile[str(self.startUserID)][str(itemList[item])] = round(self.itemAverage[str(itemList[item])])
#target
for j in range(self.targetCount):
target = np.random.randint(len(self.targetItems))
self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore
self.spamItem[str(self.startUserID)].append(self.targetItems[target])
self.startUserID += 1
```
### Random average relation
```
class RA_Attack(RandomRelationAttack,AverageAttack):
def __init__(self,conf):
super(RA_Attack, self).__init__(conf)
```
### Bandwagon attack
```
class BandWagonAttack(Attack):
def __init__(self,conf):
super(BandWagonAttack, self).__init__(conf)
self.hotItems = sorted(iter(self.itemProfile.items()), key=lambda d: len(d[1]), reverse=True)[
:int(self.selectedSize * len(self.itemProfile))]
def insertSpam(self,startID=0):
print('Modeling bandwagon attack...')
itemList = list(self.itemProfile.keys())
if startID == 0:
self.startUserID = len(self.userProfile)
else:
self.startUserID = startID
for i in range(int(len(self.userProfile)*self.attackSize)):
#fill 装填项目
fillerItems = self.getFillerItems()
for item in fillerItems:
self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)
#selected 选择项目
selectedItems = self.getSelectedItems()
for item in selectedItems:
self.spamProfile[str(self.startUserID)][item] = self.targetScore
#target 目标项目
for j in range(self.targetCount):
target = np.random.randint(len(self.targetItems))
self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore
self.spamItem[str(self.startUserID)].append(self.targetItems[target])
self.startUserID += 1
def getFillerItems(self):
mu = int(self.fillerSize*len(self.itemProfile))
sigma = int(0.1*mu)
markedItemsCount = int(round(random.gauss(mu, sigma)))
if markedItemsCount < 0:
markedItemsCount = 0
markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)
return markedItems
def getSelectedItems(self):
mu = int(self.selectedSize * len(self.itemProfile))
sigma = int(0.1 * mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedIndexes = np.random.randint(len(self.hotItems), size=markedItemsCount)
markedItems = [self.hotItems[index][0] for index in markedIndexes]
return markedItems
```
### Random bandwagon relation
```
class RB_Attack(RandomRelationAttack,BandWagonAttack):
def __init__(self,conf):
super(RB_Attack, self).__init__(conf)
```
### Hybrid attack
```
class HybridAttack(Attack):
def __init__(self,conf):
super(HybridAttack, self).__init__(conf)
self.aveAttack = AverageAttack(conf)
self.bandAttack = BandWagonAttack(conf)
self.randAttack = RandomAttack(conf)
def insertSpam(self,startID=0):
self.aveAttack.insertSpam()
self.bandAttack.insertSpam(self.aveAttack.startUserID+1)
self.randAttack.insertSpam(self.bandAttack.startUserID+1)
self.spamProfile = {}
self.spamProfile.update(self.aveAttack.spamProfile)
self.spamProfile.update(self.bandAttack.spamProfile)
self.spamProfile.update(self.randAttack.spamProfile)
def generateProfiles(self,filename):
ratings = []
path = self.outputDir + filename
with open(path, 'w') as f:
for user in self.userProfile:
for item in self.userProfile[user]:
ratings.append(user + ' ' + item + ' ' + str(self.userProfile[user][item]) + '\n')
for user in self.spamProfile:
for item in self.spamProfile[user]:
ratings.append(user + ' ' + item + ' ' + str(self.spamProfile[user][item]) + '\n')
f.writelines(ratings)
print('User labels have been output to ' + abspath(self.config['outputDir']) + '.')
def generateLabels(self,filename):
labels = []
path = self.outputDir + filename
with open(path,'w') as f:
for user in self.spamProfile:
labels.append(user+' 1\n')
for user in self.userProfile:
labels.append(user+' 0\n')
f.writelines(labels)
print('User profiles have been output to '+abspath(self.config['outputDir'])+'.')
```
### Generate data
```
%%writefile config.conf
ratings=dataset/filmtrust/ratings.txt
ratings.setup=-columns 0 1 2
social=dataset/filmtrust/trust.txt
social.setup=-columns 0 1 2
attackSize=0.1
fillerSize=0.05
selectedSize=0.005
targetCount=20
targetScore=4.0
threshold=3.0
maxScore=4.0
minScore=1.0
minCount=5
maxCount=50
linkSize=0.001
outputDir=output/
attack = RR_Attack('config.conf')
attack.insertSpam()
attack.farmLink()
attack.generateLabels('labels.txt')
attack.generateProfiles('profiles.txt')
attack.generateSocialConnections('relations.txt')
```
## Data access objects
```
class RatingDAO(object):
'data access control'
def __init__(self,config, trainingData, testData):
self.config = config
self.ratingConfig = LineConfig(config['ratings.setup'])
self.user = {} #used to store the order of users in the training set
self.item = {} #used to store the order of items in the training set
self.id2user = {}
self.id2item = {}
self.all_Item = {}
self.all_User = {}
self.userMeans = {} #used to store the mean values of users's ratings
self.itemMeans = {} #used to store the mean values of items's ratings
self.globalMean = 0
self.timestamp = {}
# self.trainingMatrix = None
# self.validationMatrix = None
self.testSet_u = testData.copy() # used to store the test set by hierarchy user:[item,rating]
self.testSet_i = defaultdict(dict) # used to store the test set by hierarchy item:[user,rating]
self.trainingSet_u = trainingData.copy()
self.trainingSet_i = defaultdict(dict)
#self.rScale = []
self.trainingData = trainingData
self.testData = testData
self.__generateSet()
self.__computeItemMean()
self.__computeUserMean()
self.__globalAverage()
def __generateSet(self):
scale = set()
# find the maximum rating and minimum value
# for i, entry in enumerate(self.trainingData):
# userName, itemName, rating = entry
# scale.add(float(rating))
# self.rScale = list(scale)
# self.rScale.sort()
for i,user in enumerate(self.trainingData):
for item in self.trainingData[user]:
# makes the rating within the range [0, 1].
#rating = normalize(float(rating), self.rScale[-1], self.rScale[0])
#self.trainingSet_u[userName][itemName] = float(rating)
self.trainingSet_i[item][user] = self.trainingData[user][item]
# order the user
if user not in self.user:
self.user[user] = len(self.user)
self.id2user[self.user[user]] = user
# order the item
if item not in self.item:
self.item[item] = len(self.item)
self.id2item[self.item[item]] = item
self.trainingSet_i[item][user] = self.trainingData[user][item]
# userList.append
# triple.append([self.user[userName], self.item[itemName], rating])
# self.trainingMatrix = new_sparseMatrix.SparseMatrix(triple)
self.all_User.update(self.user)
self.all_Item.update(self.item)
for i, user in enumerate(self.testData):
# order the user
if user not in self.user:
self.all_User[user] = len(self.all_User)
for item in self.testData[user]:
# order the item
if item not in self.item:
self.all_Item[item] = len(self.all_Item)
#self.testSet_u[userName][itemName] = float(rating)
self.testSet_i[item][user] = self.testData[user][item]
def __globalAverage(self):
total = sum(self.userMeans.values())
if total==0:
self.globalMean = 0
else:
self.globalMean = total/len(self.userMeans)
def __computeUserMean(self):
# for u in self.user:
# n = self.row(u) > 0
# mean = 0
#
# if not self.containsUser(u): # no data about current user in training set
# pass
# else:
# sum = float(self.row(u)[0].sum())
# try:
# mean = sum/ n[0].sum()
# except ZeroDivisionError:
# mean = 0
# self.userMeans[u] = mean
for u in self.trainingSet_u:
self.userMeans[u] = sum(self.trainingSet_u[u].values())/(len(list(self.trainingSet_u[u].values()))+0.0)
for u in self.testSet_u:
self.userMeans[u] = sum(self.testSet_u[u].values())/(len(list(self.testSet_u[u].values()))+0.0)
def __computeItemMean(self):
# for c in self.item:
# n = self.col(c) > 0
# mean = 0
# if not self.containsItem(c): # no data about current user in training set
# pass
# else:
# sum = float(self.col(c)[0].sum())
# try:
# mean = sum / n[0].sum()
# except ZeroDivisionError:
# mean = 0
# self.itemMeans[c] = mean
for item in self.trainingSet_i:
self.itemMeans[item] = sum(self.trainingSet_i[item].values())/(len(list(self.trainingSet_i[item].values())) + 0.0)
for item in self.testSet_i:
self.itemMeans[item] = sum(self.testSet_i[item].values())/(len(list(self.testSet_i[item].values())) + 0.0)
def getUserId(self,u):
if u in self.user:
return self.user[u]
else:
return -1
def getItemId(self,i):
if i in self.item:
return self.item[i]
else:
return -1
def trainingSize(self):
recordCount = 0
for user in self.trainingData:
recordCount+=len(self.trainingData[user])
return (len(self.trainingSet_u),len(self.trainingSet_i),recordCount)
def testSize(self):
recordCount = 0
for user in self.testData:
recordCount += len(self.testData[user])
return (len(self.testSet_u),len(self.testSet_i),recordCount)
def contains(self,u,i):
'whether user u rated item i'
if u in self.trainingSet_u and i in self.trainingSet_u[u]:
return True
return False
def containsUser(self,u):
'whether user is in training set'
return u in self.trainingSet_u
def containsItem(self,i):
'whether item is in training set'
return i in self.trainingSet_i
def allUserRated(self, u):
if u in self.user:
return list(self.trainingSet_u[u].keys()), list(self.trainingSet_u[u].values())
else:
return list(self.testSet_u[u].keys()), list(self.testSet_u[u].values())
# def userRated(self,u):
# if self.trainingMatrix.matrix_User.has_key(self.getUserId(u)):
# itemIndex = self.trainingMatrix.matrix_User[self.user[u]].keys()
# rating = self.trainingMatrix.matrix_User[self.user[u]].values()
# return (itemIndex,rating)
# return ([],[])
#
# def itemRated(self,i):
# if self.trainingMatrix.matrix_Item.has_key(self.getItemId(i)):
# userIndex = self.trainingMatrix.matrix_Item[self.item[i]].keys()
# rating = self.trainingMatrix.matrix_Item[self.item[i]].values()
# return (userIndex,rating)
# return ([],[])
# def row(self,u):
# return self.trainingMatrix.row(self.getUserId(u))
#
# def col(self,c):
# return self.trainingMatrix.col(self.getItemId(c))
#
# def sRow(self,u):
# return self.trainingMatrix.sRow(self.getUserId(u))
#
# def sCol(self,c):
# return self.trainingMatrix.sCol(self.getItemId(c))
#
# def rating(self,u,c):
# return self.trainingMatrix.elem(self.getUserId(u),self.getItemId(c))
#
# def ratingScale(self):
# return (self.rScale[0],self.rScale[1])
# def elemCount(self):
# return self.trainingMatrix.elemCount()
class SocialDAO(object):
def __init__(self,conf,relation=list()):
self.config = conf
self.user = {} #used to store the order of users
self.relation = relation
self.followees = {}
self.followers = {}
self.trustMatrix = self.__generateSet()
def __generateSet(self):
#triple = []
for line in self.relation:
userId1,userId2,weight = line
#add relations to dict
if userId1 not in self.followees:
self.followees[userId1] = {}
self.followees[userId1][userId2] = weight
if userId2 not in self.followers:
self.followers[userId2] = {}
self.followers[userId2][userId1] = weight
# order the user
if userId1 not in self.user:
self.user[userId1] = len(self.user)
if userId2 not in self.user:
self.user[userId2] = len(self.user)
#triple.append([self.user[userId1], self.user[userId2], weight])
#return new_sparseMatrix.SparseMatrix(triple)
# def row(self,u):
# #return user u's followees
# return self.trustMatrix.row(self.user[u])
#
# def col(self,u):
# #return user u's followers
# return self.trustMatrix.col(self.user[u])
#
# def elem(self,u1,u2):
# return self.trustMatrix.elem(u1,u2)
def weight(self,u1,u2):
if u1 in self.followees and u2 in self.followees[u1]:
return self.followees[u1][u2]
else:
return 0
# def trustSize(self):
# return self.trustMatrix.size
def getFollowers(self,u):
if u in self.followers:
return self.followers[u]
else:
return {}
def getFollowees(self,u):
if u in self.followees:
return self.followees[u]
else:
return {}
def hasFollowee(self,u1,u2):
if u1 in self.followees:
if u2 in self.followees[u1]:
return True
else:
return False
return False
def hasFollower(self,u1,u2):
if u1 in self.followers:
if u2 in self.followers[u1]:
return True
else:
return False
return False
```
## Methods
### BayesDetector
```
#BayesDetector: Collaborative Shilling Detection Bridging Factorization and User Embedding
class BayesDetector(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(BayesDetector, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(BayesDetector, self).readConfiguration()
extraSettings = LineConfig(self.config['BayesDetector'])
self.k = int(extraSettings['-k'])
self.negCount = int(extraSettings['-negCount']) # the number of negative samples
if self.negCount < 1:
self.negCount = 1
self.regR = float(extraSettings['-gamma'])
self.filter = int(extraSettings['-filter'])
self.delta = float(extraSettings['-delta'])
learningRate = LineConfig(self.config['learnRate'])
self.lRate = float(learningRate['-init'])
self.maxLRate = float(learningRate['-max'])
self.maxIter = int(self.config['num.max.iter'])
regular = LineConfig(self.config['reg.lambda'])
self.regU, self.regI = float(regular['-u']), float(regular['-i'])
# self.delta = float(self.config['delta'])
def printAlgorConfig(self):
super(BayesDetector, self).printAlgorConfig()
print('k: %d' % self.negCount)
print('regR: %.5f' % self.regR)
print('filter: %d' % self.filter)
print('=' * 80)
def initModel(self):
super(BayesDetector, self).initModel()
# self.c = np.random.rand(len(self.dao.all_User) + 1) / 20 # bias value of context
self.G = np.random.rand(len(self.dao.all_User)+1, self.k) / 100 # context embedding
self.P = np.random.rand(len(self.dao.all_User)+1, self.k) / 100 # latent user matrix
self.Q = np.random.rand(len(self.dao.all_Item)+1, self.k) / 100 # latent item matrix
# constructing SPPMI matrix
self.SPPMI = defaultdict(dict)
D = len(self.dao.user)
print('Constructing SPPMI matrix...')
# for larger data set has many items, the process will be time consuming
occurrence = defaultdict(dict)
for user1 in self.dao.all_User:
iList1, rList1 = self.dao.allUserRated(user1)
if len(iList1) < self.filter:
continue
for user2 in self.dao.all_User:
if user1 == user2:
continue
if user2 not in occurrence[user1]:
iList2, rList2 = self.dao.allUserRated(user2)
if len(iList2) < self.filter:
continue
count = len(set(iList1).intersection(set(iList2)))
if count > self.filter:
occurrence[user1][user2] = count
occurrence[user2][user1] = count
maxVal = 0
frequency = {}
for user1 in occurrence:
frequency[user1] = sum(occurrence[user1].values()) * 1.0
D = sum(frequency.values()) * 1.0
# maxx = -1
for user1 in occurrence:
for user2 in occurrence[user1]:
try:
val = max([log(occurrence[user1][user2] * D / (frequency[user1] * frequency[user2]), 2) - log(
self.negCount, 2), 0])
except ValueError:
print(self.SPPMI[user1][user2])
print(self.SPPMI[user1][user2] * D / (frequency[user1] * frequency[user2]))
if val > 0:
if maxVal < val:
maxVal = val
self.SPPMI[user1][user2] = val
self.SPPMI[user2][user1] = self.SPPMI[user1][user2]
# normalize
for user1 in self.SPPMI:
for user2 in self.SPPMI[user1]:
self.SPPMI[user1][user2] = self.SPPMI[user1][user2] / maxVal
def buildModel(self):
self.dao.ratings = dict(self.dao.trainingSet_u, **self.dao.testSet_u)
#suspicous set
print('Preparing sets...')
self.sSet = defaultdict(dict)
#normal set
self.nSet = defaultdict(dict)
# self.NegativeSet = defaultdict(list)
for user in self.dao.user:
for item in self.dao.ratings[user]:
# if self.dao.ratings[user][item] >= 5 and self.labels[user]=='1':
if self.labels[user] =='1':
self.sSet[item][user] = 1
# if self.dao.ratings[user][item] >= 5 and self.labels[user] == '0':
if self.labels[user] == '0':
self.nSet[item][user] = 1
# Jointly decompose R(ratings) and SPPMI with shared user latent factors P
iteration = 0
while iteration < self.maxIter:
self.loss = 0
for item in self.sSet:
i = self.dao.all_Item[item]
if item not in self.nSet:
continue
normalUserList = list(self.nSet[item].keys())
for user in self.sSet[item]:
su = self.dao.all_User[user]
# if len(self.NegativeSet[user]) > 0:
# item_j = choice(self.NegativeSet[user])
# else:
normalUser = choice(normalUserList)
nu = self.dao.all_User[normalUser]
s = sigmoid(self.P[su].dot(self.Q[i]) - self.P[nu].dot(self.Q[i]))
self.Q[i] += (self.lRate * (1 - s) * (self.P[su] - self.P[nu]))
self.P[su] += (self.lRate * (1 - s) * self.Q[i])
self.P[nu] -= (self.lRate * (1 - s) * self.Q[i])
self.Q[i] -= self.lRate * self.regI * self.Q[i]
self.P[su] -= self.lRate * self.regU * self.P[su]
self.P[nu] -= self.lRate * self.regU * self.P[nu]
self.loss += (-log(s))
#
# for item in self.sSet:
# if not self.nSet.has_key(item):
# continue
# for user1 in self.sSet[item]:
# for user2 in self.sSet[item]:
# su1 = self.dao.all_User[user1]
# su2 = self.dao.all_User[user2]
# self.P[su1] += (self.lRate*(self.P[su1]-self.P[su2]))*self.delta
# self.P[su2] -= (self.lRate*(self.P[su1]-self.P[su2]))*self.delta
#
# self.loss += ((self.P[su1]-self.P[su2]).dot(self.P[su1]-self.P[su2]))*self.delta
for user in self.dao.ratings:
for item in self.dao.ratings[user]:
rating = self.dao.ratings[user][item]
if rating < 5:
continue
error = rating - self.predictRating(user,item)
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
p = self.P[u]
q = self.Q[i]
# self.loss += (error ** 2)*self.b
# update latent vectors
self.P[u] += (self.lRate * (error * q - self.regU * p))
self.Q[i] += (self.lRate * (error * p - self.regI * q))
for user in self.SPPMI:
u = self.dao.all_User[user]
p = self.P[u]
for context in self.SPPMI[user]:
v = self.dao.all_User[context]
m = self.SPPMI[user][context]
g = self.G[v]
diff = (m - p.dot(g))
self.loss += (diff ** 2)
# update latent vectors
self.P[u] += (self.lRate * diff * g)
self.G[v] += (self.lRate * diff * p)
self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum() + self.regR * (self.G * self.G).sum()
iteration += 1
print('iteration:',iteration)
# preparing examples
self.training = []
self.trainingLabels = []
self.test = []
self.testLabels = []
for user in self.dao.trainingSet_u:
self.training.append(self.P[self.dao.all_User[user]])
self.trainingLabels.append(self.labels[user])
for user in self.dao.testSet_u:
self.test.append(self.P[self.dao.all_User[user]])
self.testLabels.append(self.labels[user])
#
# tsne = TSNE(n_components=2)
# self.Y = tsne.fit_transform(self.P)
#
# self.normalUsers = []
# self.spammers = []
# for user in self.labels:
# if self.labels[user] == '0':
# self.normalUsers.append(user)
# else:
# self.spammers.append(user)
#
#
# print len(self.spammers)
# self.normalfeature = np.zeros((len(self.normalUsers), 2))
# self.spamfeature = np.zeros((len(self.spammers), 2))
# normal_index = 0
# for normaluser in self.normalUsers:
# if normaluser in self.dao.all_User:
# self.normalfeature[normal_index] = self.Y[self.dao.all_User[normaluser]]
# normal_index += 1
#
# spam_index = 0
# for spamuser in self.spammers:
# if spamuser in self.dao.all_User:
# self.spamfeature[spam_index] = self.Y[self.dao.all_User[spamuser]]
# spam_index += 1
# self.randomNormal = np.zeros((500,2))
# self.randomSpam = np.zeros((500,2))
# # for i in range(500):
# # self.randomNormal[i] = self.normalfeature[random.randint(0,len(self.normalfeature)-1)]
# # self.randomSpam[i] = self.spamfeature[random.randint(0,len(self.spamfeature)-1)]
# plt.scatter(self.normalfeature[:, 0], self.normalfeature[:, 1], c='red',s=8,marker='o',label='NormalUser')
# plt.scatter(self.spamfeature[:, 0], self.spamfeature[:, 1], c='blue',s=8,marker='o',label='Spammer')
# plt.legend(loc='lower left')
# plt.xticks([])
# plt.yticks([])
# plt.savefig('9.png',dpi=500)
def predictRating(self,user,item):
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
return self.P[u].dot(self.Q[i])
def predict(self):
classifier = RandomForestClassifier(n_estimators=12)
# classifier = DecisionTreeClassifier(criterion='entropy')
classifier.fit(self.training, self.trainingLabels)
pred_labels = classifier.predict(self.test)
print('Decision Tree:')
return pred_labels
```
### CoDetector
```
#CoDetector: Collaborative Shilling Detection Bridging Factorization and User Embedding
class CoDetector(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(CoDetector, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(CoDetector, self).readConfiguration()
extraSettings = LineConfig(self.config['CoDetector'])
self.k = int(extraSettings['-k'])
self.negCount = int(extraSettings['-negCount']) # the number of negative samples
if self.negCount < 1:
self.negCount = 1
self.regR = float(extraSettings['-gamma'])
self.filter = int(extraSettings['-filter'])
learningRate = LineConfig(self.config['learnRate'])
self.lRate = float(learningRate['-init'])
self.maxLRate = float(learningRate['-max'])
self.maxIter = int(self.config['num.max.iter'])
regular = LineConfig(self.config['reg.lambda'])
self.regU, self.regI = float(regular['-u']), float(regular['-i'])
def printAlgorConfig(self):
super(CoDetector, self).printAlgorConfig()
print('k: %d' % self.negCount)
print('regR: %.5f' % self.regR)
print('filter: %d' % self.filter)
print('=' * 80)
def initModel(self):
super(CoDetector, self).initModel()
self.w = np.random.rand(len(self.dao.all_User)+1) / 20 # bias value of user
self.c = np.random.rand(len(self.dao.all_User)+1)/ 20 # bias value of context
self.G = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # context embedding
self.P = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # latent user matrix
self.Q = np.random.rand(len(self.dao.all_Item)+1, self.k) / 20 # latent item matrix
# constructing SPPMI matrix
self.SPPMI = defaultdict(dict)
D = len(self.dao.user)
print('Constructing SPPMI matrix...')
# for larger data set has many items, the process will be time consuming
occurrence = defaultdict(dict)
for user1 in self.dao.all_User:
iList1, rList1 = self.dao.allUserRated(user1)
if len(iList1) < self.filter:
continue
for user2 in self.dao.all_User:
if user1 == user2:
continue
if user2 not in occurrence[user1]:
iList2, rList2 = self.dao.allUserRated(user2)
if len(iList2) < self.filter:
continue
count = len(set(iList1).intersection(set(iList2)))
if count > self.filter:
occurrence[user1][user2] = count
occurrence[user2][user1] = count
maxVal = 0
frequency = {}
for user1 in occurrence:
frequency[user1] = sum(occurrence[user1].values()) * 1.0
D = sum(frequency.values()) * 1.0
# maxx = -1
for user1 in occurrence:
for user2 in occurrence[user1]:
try:
val = max([log(occurrence[user1][user2] * D / (frequency[user1] * frequency[user2]), 2) - log(
self.negCount, 2), 0])
except ValueError:
print(self.SPPMI[user1][user2])
print(self.SPPMI[user1][user2] * D / (frequency[user1] * frequency[user2]))
if val > 0:
if maxVal < val:
maxVal = val
self.SPPMI[user1][user2] = val
self.SPPMI[user2][user1] = self.SPPMI[user1][user2]
# normalize
for user1 in self.SPPMI:
for user2 in self.SPPMI[user1]:
self.SPPMI[user1][user2] = self.SPPMI[user1][user2] / maxVal
def buildModel(self):
# Jointly decompose R(ratings) and SPPMI with shared user latent factors P
iteration = 0
while iteration < self.maxIter:
self.loss = 0
self.dao.ratings = dict(self.dao.trainingSet_u, **self.dao.testSet_u)
for user in self.dao.ratings:
for item in self.dao.ratings[user]:
rating = self.dao.ratings[user][item]
error = rating - self.predictRating(user,item)
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
p = self.P[u]
q = self.Q[i]
self.loss += error ** 2
# update latent vectors
self.P[u] += self.lRate * (error * q - self.regU * p)
self.Q[i] += self.lRate * (error * p - self.regI * q)
for user in self.SPPMI:
u = self.dao.all_User[user]
p = self.P[u]
for context in self.SPPMI[user]:
v = self.dao.all_User[context]
m = self.SPPMI[user][context]
g = self.G[v]
diff = (m - p.dot(g) - self.w[u] - self.c[v])
self.loss += diff ** 2
# update latent vectors
self.P[u] += self.lRate * diff * g
self.G[v] += self.lRate * diff * p
self.w[u] += self.lRate * diff
self.c[v] += self.lRate * diff
self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum() + self.regR * (self.G * self.G).sum()
iteration += 1
print('iteration:',iteration)
# preparing examples
self.training = []
self.trainingLabels = []
self.test = []
self.testLabels = []
for user in self.dao.trainingSet_u:
self.training.append(self.P[self.dao.all_User[user]])
self.trainingLabels.append(self.labels[user])
for user in self.dao.testSet_u:
self.test.append(self.P[self.dao.all_User[user]])
self.testLabels.append(self.labels[user])
def predictRating(self,user,item):
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
return self.P[u].dot(self.Q[i])
def predict(self):
classifier = DecisionTreeClassifier(criterion='entropy')
classifier.fit(self.training, self.trainingLabels)
pred_labels = classifier.predict(self.test)
print('Decision Tree:')
return pred_labels
```
### DegreeSAD
```
class DegreeSAD(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(DegreeSAD, self).__init__(conf, trainingSet, testSet, labels, fold)
def buildModel(self):
self.MUD = {}
self.RUD = {}
self.QUD = {}
# computing MUD,RUD,QUD for training set
sList = sorted(iter(self.dao.trainingSet_i.items()), key=lambda d: len(d[1]), reverse=True)
maxLength = len(sList[0][1])
for user in self.dao.trainingSet_u:
self.MUD[user] = 0
for item in self.dao.trainingSet_u[user]:
self.MUD[user] += len(self.dao.trainingSet_i[item]) #/ float(maxLength)
self.MUD[user]/float(len(self.dao.trainingSet_u[user]))
lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.trainingSet_u[user]]
lengthList.sort(reverse=True)
self.RUD[user] = lengthList[0] - lengthList[-1]
lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.trainingSet_u[user]]
lengthList.sort()
self.QUD[user] = lengthList[int((len(lengthList) - 1) / 4.0)]
# computing MUD,RUD,QUD for test set
for user in self.dao.testSet_u:
self.MUD[user] = 0
for item in self.dao.testSet_u[user]:
self.MUD[user] += len(self.dao.trainingSet_i[item]) #/ float(maxLength)
for user in self.dao.testSet_u:
lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.testSet_u[user]]
lengthList.sort(reverse=True)
self.RUD[user] = lengthList[0] - lengthList[-1]
for user in self.dao.testSet_u:
lengthList = [len(self.dao.trainingSet_i[item]) for item in self.dao.testSet_u[user]]
lengthList.sort()
self.QUD[user] = lengthList[int((len(lengthList) - 1) / 4.0)]
# preparing examples
for user in self.dao.trainingSet_u:
self.training.append([self.MUD[user], self.RUD[user], self.QUD[user]])
self.trainingLabels.append(self.labels[user])
for user in self.dao.testSet_u:
self.test.append([self.MUD[user], self.RUD[user], self.QUD[user]])
self.testLabels.append(self.labels[user])
def predict(self):
# classifier = LogisticRegression()
# classifier.fit(self.training, self.trainingLabels)
# pred_labels = classifier.predict(self.test)
# print 'Logistic:'
# print classification_report(self.testLabels, pred_labels)
#
# classifier = SVC()
# classifier.fit(self.training, self.trainingLabels)
# pred_labels = classifier.predict(self.test)
# print 'SVM:'
# print classification_report(self.testLabels, pred_labels)
classifier = DecisionTreeClassifier(criterion='entropy')
classifier.fit(self.training, self.trainingLabels)
pred_labels = classifier.predict(self.test)
print('Decision Tree:')
return pred_labels
```
### FAP
```
class FAP(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(FAP, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(FAP, self).readConfiguration()
# # s means the number of seedUser who be regarded as spammer in training
self.s =int( self.config['seedUser'])
# preserve the real spammer ID
self.spammer = []
for i in self.dao.user:
if self.labels[i] == '1':
self.spammer.append(self.dao.user[i])
sThreshold = int(0.5 * len(self.spammer))
if self.s > sThreshold :
self.s = sThreshold
print('*** seedUser is more than a half of spammer, so it is set to', sThreshold, '***')
# # predict top-k user as spammer
self.k = int(self.config['topKSpam'])
# 0.5 is the ratio of spammer to dataset, it can be changed according to different datasets
kThreshold = int(0.5 * (len(self.dao.user) - self.s))
if self.k > kThreshold:
self.k = kThreshold
print('*** the number of top-K users is more than threshold value, so it is set to', kThreshold, '***')
# product transition probability matrix self.TPUI and self.TPIU
def __computeTProbability(self):
# m--user count; n--item count
m, n, tmp = self.dao.trainingSize()
self.TPUI = np.zeros((m, n))
self.TPIU = np.zeros((n, m))
self.userUserIdDic = {}
self.itemItemIdDic = {}
tmpUser = list(self.dao.user.values())
tmpUserId = list(self.dao.user.keys())
tmpItem = list(self.dao.item.values())
tmpItemId = list(self.dao.item.keys())
for users in range(0, m):
self.userUserIdDic[tmpUser[users]] = tmpUserId[users]
for items in range(0, n):
self.itemItemIdDic[tmpItem[items]] = tmpItemId[items]
for i in range(0, m):
for j in range(0, n):
user = self.userUserIdDic[i]
item = self.itemItemIdDic[j]
# if has edge in graph,set a value ;otherwise set 0
if (user not in self.bipartiteGraphUI) or (item not in self.bipartiteGraphUI[user]):
continue
else:
w = float(self.bipartiteGraphUI[user][item])
# to avoid positive feedback and reliability problem,we should Polish the w
otherItemW = 0
otherUserW = 0
for otherItem in self.bipartiteGraphUI[user]:
otherItemW += float(self.bipartiteGraphUI[user][otherItem])
for otherUser in self.dao.trainingSet_i[item]:
otherUserW += float(self.bipartiteGraphUI[otherUser][item])
# wPrime = w*1.0/(otherUserW * otherItemW)
wPrime = w
self.TPUI[i][j] = wPrime / otherItemW
self.TPIU[j][i] = wPrime / otherUserW
if i % 100 == 0:
print('progress: %d/%d' %(i,m))
def initModel(self):
# construction of the bipartite graph
print("constructing bipartite graph...")
self.bipartiteGraphUI = {}
for user in self.dao.trainingSet_u:
tmpUserItemDic = {} # user-item-point
for item in self.dao.trainingSet_u[user]:
# tmpItemUserDic = {}#item-user-point
recordValue = float(self.dao.trainingSet_u[user][item])
w = 1 + abs((recordValue - self.dao.userMeans[user]) / self.dao.userMeans[user]) + abs(
(recordValue - self.dao.itemMeans[item]) / self.dao.itemMeans[item]) + abs(
(recordValue - self.dao.globalMean) / self.dao.globalMean)
# tmpItemUserDic[user] = w
tmpUserItemDic[item] = w
# self.bipartiteGraphIU[item] = tmpItemUserDic
self.bipartiteGraphUI[user] = tmpUserItemDic
# we do the polish in computing the transition probability
print("computing transition probability...")
self.__computeTProbability()
def isConvergence(self, PUser, PUserOld):
if len(PUserOld) == 0:
return True
for i in range(0, len(PUser)):
if (PUser[i] - PUserOld[i]) > 0.01:
return True
return False
def buildModel(self):
# -------init--------
m, n, tmp = self.dao.trainingSize()
PUser = np.zeros(m)
PItem = np.zeros(n)
self.testLabels = [0 for i in range(m)]
self.predLabels = [0 for i in range(m)]
# preserve seedUser Index
self.seedUser = []
randDict = {}
for i in range(0, self.s):
randNum = random.randint(0, len(self.spammer) - 1)
while randNum in randDict:
randNum = random.randint(0, len(self.spammer) - 1)
randDict[randNum] = 0
self.seedUser.append(int(self.spammer[randNum]))
# print len(randDict), randDict
#initial user and item spam probability
for j in range(0, m):
if j in self.seedUser:
#print type(j),j
PUser[j] = 1
else:
PUser[j] = random.random()
for tmp in range(0, n):
PItem[tmp] = random.random()
# -------iterator-------
PUserOld = []
iterator = 0
while self.isConvergence(PUser, PUserOld):
#while iterator < 100:
for j in self.seedUser:
PUser[j] = 1
PUserOld = PUser
PItem = np.dot(self.TPIU, PUser)
PUser = np.dot(self.TPUI, PItem)
iterator += 1
print(self.foldInfo,'iteration', iterator)
PUserDict = {}
userId = 0
for i in PUser:
PUserDict[userId] = i
userId += 1
for j in self.seedUser:
del PUserDict[j]
self.PSort = sorted(iter(PUserDict.items()), key=lambda d: d[1], reverse=True)
def predict(self):
# predLabels
# top-k user as spammer
spamList = []
sIndex = 0
while sIndex < self.k:
spam = self.PSort[sIndex][0]
spamList.append(spam)
self.predLabels[spam] = 1
sIndex += 1
# trueLabels
for user in self.dao.trainingSet_u:
userInd = self.dao.user[user]
# print type(user), user, userInd
self.testLabels[userInd] = int(self.labels[user])
# delete seedUser labels
differ = 0
for user in self.seedUser:
user = int(user - differ)
# print type(user)
del self.predLabels[user]
del self.testLabels[user]
differ += 1
return self.predLabels
```
### PCASelectUsers
```
class PCASelectUsers(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]', k=None, n=None ):
super(PCASelectUsers, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(PCASelectUsers, self).readConfiguration()
# K = top-K vals of cov
self.k = int(self.config['kVals'])
self.userNum = len(self.dao.trainingSet_u)
self.itemNum = len(self.dao.trainingSet_i)
if self.k >= min(self.userNum, self.itemNum):
self.k = 3
print('*** k-vals is more than the number of user or item, so it is set to', self.k)
# n = attack size or the ratio of spammers to normal users
self.n = float(self.config['attackSize'])
def buildModel(self):
#array initialization
dataArray = np.zeros([self.userNum, self.itemNum], dtype=float)
self.testLabels = np.zeros(self.userNum)
self.predLabels = np.zeros(self.userNum)
#add data
print('construct matrix')
for user in self.dao.trainingSet_u:
for item in list(self.dao.trainingSet_u[user].keys()):
value = self.dao.trainingSet_u[user][item]
a = self.dao.user[user]
b = self.dao.item[item]
dataArray[a][b] = value
sMatrix = csr_matrix(dataArray)
# z-scores
sMatrix = preprocessing.scale(sMatrix, axis=0, with_mean=False)
sMT = np.transpose(sMatrix)
# cov
covSM = np.dot(sMT, sMatrix)
# eigen-value-decomposition
vals, vecs = scipy.sparse.linalg.eigs(covSM, k=self.k, which='LM')
newArray = np.dot(dataArray**2, np.real(vecs))
distanceDict = {}
userId = 0
for user in newArray:
distance = 0
for tmp in user:
distance += tmp
distanceDict[userId] = float(distance)
userId += 1
print('sort distance ')
self.disSort = sorted(iter(distanceDict.items()), key=lambda d: d[1], reverse=False)
def predict(self):
print('predict spammer')
spamList = []
i = 0
while i < self.n * len(self.disSort):
spam = self.disSort[i][0]
spamList.append(spam)
self.predLabels[spam] = 1
i += 1
# trueLabels
for user in self.dao.trainingSet_u:
userInd = self.dao.user[user]
self.testLabels[userInd] = int(self.labels[user])
return self.predLabels
```
### SemiSAD
```
class SemiSAD(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(SemiSAD, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(SemiSAD, self).readConfiguration()
# K = top-K vals of cov
self.k = int(self.config['topK'])
# Lambda = λ参数
self.Lambda = float(self.config['Lambda'])
def buildModel(self):
self.H = {}
self.DegSim = {}
self.LengVar = {}
self.RDMA = {}
self.FMTD = {}
print('Begin feature engineering...')
# computing H,DegSim,LengVar,RDMA,FMTD for LabledData set
trainingIndex = 0
testIndex = 0
trainingUserCount, trainingItemCount, trainingrecordCount = self.dao.trainingSize()
testUserCount, testItemCount, testrecordCount = self.dao.testSize()
for user in self.dao.trainingSet_u:
trainingIndex += 1
self.H[user] = 0
for i in range(10,50,5):
n = 0
for item in self.dao.trainingSet_u[user]:
if(self.dao.trainingSet_u[user][item]==(i/10.0)):
n+=1
if n==0:
self.H[user] += 0
else:
self.H[user] += (-(n/(trainingUserCount*1.0))*math.log(n/(trainingUserCount*1.0),2))
SimList = []
self.DegSim[user] = 0
for user1 in self.dao.trainingSet_u:
userA, userB, C, D, E, Count = 0,0,0,0,0,0
for item in list(set(self.dao.trainingSet_u[user]).intersection(set(self.dao.trainingSet_u[user1]))):
userA += self.dao.trainingSet_u[user][item]
userB += self.dao.trainingSet_u[user1][item]
Count += 1
if Count==0:
AverageA = 0
AverageB = 0
else:
AverageA = userA/Count
AverageB = userB/Count
for item in list(set(self.dao.trainingSet_u[user]).intersection(set(self.dao.trainingSet_u[user1]))):
C += (self.dao.trainingSet_u[user][item]-AverageA)*(self.dao.trainingSet_u[user1][item]-AverageB)
D += np.square(self.dao.trainingSet_u[user][item]-AverageA)
E += np.square(self.dao.trainingSet_u[user1][item]-AverageB)
if C==0:
SimList.append(0.0)
else:
SimList.append(C/(math.sqrt(D)*math.sqrt(E)))
SimList.sort(reverse=True)
for i in range(1,self.k+1):
self.DegSim[user] += SimList[i] / (self.k)
GlobalAverage = 0
F = 0
for user2 in self.dao.trainingSet_u:
GlobalAverage += len(self.dao.trainingSet_u[user2]) / (len(self.dao.trainingSet_u) + 0.0)
for user3 in self.dao.trainingSet_u:
F += pow(len(self.dao.trainingSet_u[user3])-GlobalAverage,2)
self.LengVar[user] = abs(len(self.dao.trainingSet_u[user])-GlobalAverage)/(F*1.0)
Divisor = 0
for item1 in self.dao.trainingSet_u[user]:
Divisor += abs(self.dao.trainingSet_u[user][item1]-self.dao.itemMeans[item1])/len(self.dao.trainingSet_i[item1])
self.RDMA[user] = Divisor/len(self.dao.trainingSet_u[user])
Minuend, index1, Subtrahend, index2 = 0, 0, 0, 0
for item3 in self.dao.trainingSet_u[user]:
if(self.dao.trainingSet_u[user][item3]==5.0 or self.dao.trainingSet_u[user][item3]==1.0) :
Minuend += sum(self.dao.trainingSet_i[item3].values())
index1 += len(self.dao.trainingSet_i[item3])
else:
Subtrahend += sum(self.dao.trainingSet_i[item3].values())
index2 += len(self.dao.trainingSet_i[item3])
if index1 == 0 and index2 == 0:
self.FMTD[user] = 0
elif index1 == 0:
self.FMTD[user] = abs(Subtrahend / index2)
elif index2 == 0:
self.FMTD[user] = abs(Minuend / index1)
else:
self.FMTD[user] = abs(Minuend / index1 - Subtrahend / index2)
if trainingIndex==(trainingUserCount/5):
print('trainingData Done 20%...')
elif trainingIndex==(trainingUserCount/5*2):
print('trainingData Done 40%...')
elif trainingIndex==(trainingUserCount/5*3):
print('trainingData Done 60%...')
elif trainingIndex==(trainingUserCount/5*4):
print('trainingData Done 80%...')
elif trainingIndex==(trainingUserCount):
print('trainingData Done 100%...')
# computing H,DegSim,LengVar,RDMA,FMTD for UnLabledData set
for user in self.dao.testSet_u:
testIndex += 1
self.H[user] = 0
for i in range(10,50,5):
n = 0
for item in self.dao.testSet_u[user]:
if(self.dao.testSet_u[user][item]==(i/10.0)):
n+=1
if n==0:
self.H[user] += 0
else:
self.H[user] += (-(n/(testUserCount*1.0))*math.log(n/(testUserCount*1.0),2))
SimList = []
self.DegSim[user] = 0
for user1 in self.dao.testSet_u:
userA, userB, C, D, E, Count = 0,0,0,0,0,0
for item in list(set(self.dao.testSet_u[user]).intersection(set(self.dao.testSet_u[user1]))):
userA += self.dao.testSet_u[user][item]
userB += self.dao.testSet_u[user1][item]
Count += 1
if Count==0:
AverageA = 0
AverageB = 0
else:
AverageA = userA/Count
AverageB = userB/Count
for item in list(set(self.dao.testSet_u[user]).intersection(set(self.dao.testSet_u[user1]))):
C += (self.dao.testSet_u[user][item]-AverageA)*(self.dao.testSet_u[user1][item]-AverageB)
D += np.square(self.dao.testSet_u[user][item]-AverageA)
E += np.square(self.dao.testSet_u[user1][item]-AverageB)
if C==0:
SimList.append(0.0)
else:
SimList.append(C/(math.sqrt(D)*math.sqrt(E)))
SimList.sort(reverse=True)
for i in range(1,self.k+1):
self.DegSim[user] += SimList[i] / self.k
GlobalAverage = 0
F = 0
for user2 in self.dao.testSet_u:
GlobalAverage += len(self.dao.testSet_u[user2]) / (len(self.dao.testSet_u) + 0.0)
for user3 in self.dao.testSet_u:
F += pow(len(self.dao.testSet_u[user3])-GlobalAverage,2)
self.LengVar[user] = abs(len(self.dao.testSet_u[user])-GlobalAverage)/(F*1.0)
Divisor = 0
for item1 in self.dao.testSet_u[user]:
Divisor += abs(self.dao.testSet_u[user][item1]-self.dao.itemMeans[item1])/len(self.dao.testSet_i[item1])
self.RDMA[user] = Divisor/len(self.dao.testSet_u[user])
Minuend, index1, Subtrahend, index2= 0,0,0,0
for item3 in self.dao.testSet_u[user]:
if(self.dao.testSet_u[user][item3]==5.0 or self.dao.testSet_u[user][item3]==1.0):
Minuend += sum(self.dao.testSet_i[item3].values())
index1 += len(self.dao.testSet_i[item3])
else:
Subtrahend += sum(self.dao.testSet_i[item3].values())
index2 += len(self.dao.testSet_i[item3])
if index1 == 0 and index2 == 0:
self.FMTD[user] = 0
elif index1 == 0:
self.FMTD[user] = abs(Subtrahend / index2)
elif index2 == 0:
self.FMTD[user] = abs(Minuend / index1)
else:
self.FMTD[user] = abs(Minuend / index1 - Subtrahend / index2)
if testIndex == testUserCount / 5:
print('testData Done 20%...')
elif testIndex == testUserCount / 5 * 2:
print('testData Done 40%...')
elif testIndex == testUserCount / 5 * 3:
print('testData Done 60%...')
elif testIndex == testUserCount / 5 * 4:
print('testData Done 80%...')
elif testIndex == testUserCount:
print('testData Done 100%...')
# preparing examples training for LabledData ,test for UnLableData
for user in self.dao.trainingSet_u:
self.training.append([self.H[user], self.DegSim[user], self.LengVar[user],self.RDMA[user],self.FMTD[user]])
self.trainingLabels.append(self.labels[user])
for user in self.dao.testSet_u:
self.test.append([self.H[user], self.DegSim[user], self.LengVar[user],self.RDMA[user],self.FMTD[user]])
self.testLabels.append(self.labels[user])
def predict(self):
ClassifierN = 0
classifier = GaussianNB()
X_train,X_test,y_train,y_test = train_test_split(self.training,self.trainingLabels,test_size=0.75,random_state=33)
classifier.fit(X_train, y_train)
# predict UnLabledData
#pred_labelsForTrainingUn = classifier.predict(X_test)
print('Enhanced classifier...')
while 1:
if len(X_test)<=5: # min
break #min
proba_labelsForTrainingUn = classifier.predict_proba(X_test)
X_test_labels = np.hstack((X_test, proba_labelsForTrainingUn))
X_test_labels0_sort = sorted(X_test_labels,key=lambda x:x[5],reverse=True)
if X_test_labels0_sort[4][5]>X_test_labels0_sort[4][6]:
a = [x[:5] for x in X_test_labels0_sort]
b = a[0:5]
classifier.partial_fit(b, ['0','0','0','0','0'], classes=['0', '1'],sample_weight=np.ones(len(b), dtype=np.float) * self.Lambda)
X_test_labels = X_test_labels0_sort[5:]
X_test = a[5:]
if len(X_test)<6: # min
break #min
X_test_labels0_sort = sorted(X_test_labels, key=lambda x: x[5], reverse=True)
if X_test_labels0_sort[4][5]<=X_test_labels0_sort[4][6]: #min
a = [x[:5] for x in X_test_labels0_sort]
b = a[0:5]
classifier.partial_fit(b, ['1', '1', '1', '1', '1'], classes=['0', '1'],sample_weight=np.ones(len(b), dtype=np.float) * 1)
X_test_labels = X_test_labels0_sort[5:] # min
X_test = a[5:]
if len(X_test)<6:
break
# while 1 :
# p1 = pred_labelsForTrainingUn
# # 将带λ参数的无标签数据拟合入分类器
# classifier.partial_fit(X_test, pred_labelsForTrainingUn,classes=['0','1'], sample_weight=np.ones(len(X_test),dtype=np.float)*self.Lambda)
# pred_labelsForTrainingUn = classifier.predict(X_test)
# p2 = pred_labelsForTrainingUn
# # 判断分类器是否稳定
# if list(p1)==list(p2) :
# ClassifierN += 1
# elif ClassifierN > 0:
# ClassifierN = 0
# if ClassifierN == 20:
# break
pred_labels = classifier.predict(self.test)
print('naive_bayes with EM algorithm:')
return pred_labels
```
## Main
```
class SDLib(object):
def __init__(self,config):
self.trainingData = [] # training data
self.testData = [] # testData
self.relation = []
self.measure = []
self.config =config
self.ratingConfig = LineConfig(config['ratings.setup'])
self.labels = FileIO.loadLabels(config['label'])
if self.config.contains('evaluation.setup'):
self.evaluation = LineConfig(config['evaluation.setup'])
if self.evaluation.contains('-testSet'):
#specify testSet
self.trainingData = FileIO.loadDataSet(config, config['ratings'])
self.testData = FileIO.loadDataSet(config, self.evaluation['-testSet'], bTest=True)
elif self.evaluation.contains('-ap'):
#auto partition
self.trainingData = FileIO.loadDataSet(config,config['ratings'])
self.trainingData,self.testData = DataSplit.\
dataSplit(self.trainingData,test_ratio=float(self.evaluation['-ap']))
elif self.evaluation.contains('-cv'):
#cross validation
self.trainingData = FileIO.loadDataSet(config, config['ratings'])
#self.trainingData,self.testData = DataSplit.crossValidation(self.trainingData,int(self.evaluation['-cv']))
else:
print('Evaluation is not well configured!')
exit(-1)
if config.contains('social'):
self.socialConfig = LineConfig(self.config['social.setup'])
self.relation = FileIO.loadRelationship(config,self.config['social'])
print('preprocessing...')
def execute(self):
if self.evaluation.contains('-cv'):
k = int(self.evaluation['-cv'])
if k <= 1 or k > 10:
k = 3
#create the manager used to communication in multiprocess
manager = Manager()
m = manager.dict()
i = 1
tasks = []
for train,test in DataSplit.crossValidation(self.trainingData,k):
fold = '['+str(i)+']'
if self.config.contains('social'):
method = self.config['methodName'] + "(self.config,train,test,self.labels,self.relation,fold)"
else:
method = self.config['methodName'] + "(self.config,train,test,self.labels,fold)"
#create the process
p = Process(target=run,args=(m,eval(method),i))
tasks.append(p)
i+=1
#start the processes
for p in tasks:
p.start()
#wait until all processes are completed
for p in tasks:
p.join()
#compute the mean error of k-fold cross validation
self.measure = [dict(m)[i] for i in range(1,k+1)]
res = []
pattern = re.compile('(\d+\.\d+)')
countPattern = re.compile('\d+\\n')
labelPattern = re.compile('\s\d{1}[^\.|\n|\d]')
labels = re.findall(labelPattern, self.measure[0])
values = np.array([0]*9,dtype=float)
count = np.array([0,0,0],dtype=int)
for report in self.measure:
patterns = np.array(re.findall(pattern,report),dtype=float)
values += patterns[:9]
patterncounts = np.array(re.findall(countPattern,report),dtype=int)
count += patterncounts[:3]
values/=k
values=np.around(values,decimals=4)
res.append(' precision recall f1-score support\n\n')
res.append(' '+labels[0]+' '+' '.join(np.array(values[0:3],dtype=str).tolist())+' '+str(count[0])+'\n')
res.append(' '+labels[1]+' '+' '.join(np.array(values[3:6],dtype=str).tolist())+' '+str(count[1])+'\n\n')
res.append(' avg/total ' + ' '.join(np.array(values[6:9], dtype=str).tolist()) + ' ' + str(count[2]) + '\n')
print('Total:')
print(''.join(res))
# for line in lines[1:]:
#
# measure = self.measure[0][i].split(':')[0]
# total = 0
# for j in range(k):
# total += float(self.measure[j][i].split(':')[1])
# res.append(measure+':'+str(total/k)+'\n')
#output result
currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
outDir = LineConfig(self.config['output.setup'])['-dir']
fileName = self.config['methodName'] +'@'+currentTime+'-'+str(k)+'-fold-cv' + '.txt'
FileIO.writeFile(outDir,fileName,res)
print('The results have been output to '+abspath(LineConfig(self.config['output.setup'])['-dir'])+'\n')
else:
if self.config.contains('social'):
method = self.config['methodName'] + '(self.config,self.trainingData,self.testData,self.labels,self.relation)'
else:
method = self.config['methodName'] + '(self.config,self.trainingData,self.testData,self.labels)'
eval(method).execute()
def run(measure,algor,order):
measure[order] = algor.execute()
conf = Config('DegreeSAD.conf')
sd = SDLib(conf)
sd.execute()
print('='*80)
print('Supervised Methods:')
print('1. DegreeSAD 2.CoDetector 3.BayesDetector\n')
print('Semi-Supervised Methods:')
print('4. SemiSAD\n')
print('Unsupervised Methods:')
print('5. PCASelectUsers 6. FAP 7.timeIndex\n')
print('-'*80)
order = eval(input('please enter the num of the method to run it:'))
algor = -1
conf = -1
s = tm.clock()
if order == 1:
conf = Config('DegreeSAD.conf')
elif order == 2:
conf = Config('CoDetector.conf')
elif order == 3:
conf = Config('BayesDetector.conf')
elif order == 4:
conf = Config('SemiSAD.conf')
elif order == 5:
conf = Config('PCASelectUsers.conf')
elif order == 6:
conf = Config('FAP.conf')
elif order == 7:
conf = Config('timeIndex.conf')
else:
print('Error num!')
exit(-1)
# conf = Config('DegreeSAD.conf')
sd = SDLib(conf)
sd.execute()
e = tm.clock()
print("Run time: %f s" % (e - s))
print('='*80)
print('Supervised Methods:')
print('1. DegreeSAD 2.CoDetector 3.BayesDetector\n')
print('Semi-Supervised Methods:')
print('4. SemiSAD\n')
print('Unsupervised Methods:')
print('5. PCASelectUsers 6. FAP 7.timeIndex\n')
print('-'*80)
order = eval(input('please enter the num of the method to run it:'))
algor = -1
conf = -1
s = tm.clock()
if order == 1:
conf = Config('DegreeSAD.conf')
elif order == 2:
conf = Config('CoDetector.conf')
elif order == 3:
conf = Config('BayesDetector.conf')
elif order == 4:
conf = Config('SemiSAD.conf')
elif order == 5:
conf = Config('PCASelectUsers.conf')
elif order == 6:
conf = Config('FAP.conf')
elif order == 7:
conf = Config('timeIndex.conf')
else:
print('Error num!')
exit(-1)
# conf = Config('DegreeSAD.conf')
sd = SDLib(conf)
sd.execute()
e = tm.clock()
print("Run time: %f s" % (e - s))
print('='*80)
print('Supervised Methods:')
print('1. DegreeSAD 2.CoDetector 3.BayesDetector\n')
print('Semi-Supervised Methods:')
print('4. SemiSAD\n')
print('Unsupervised Methods:')
print('5. PCASelectUsers 6. FAP 7.timeIndex\n')
print('-'*80)
order = eval(input('please enter the num of the method to run it:'))
algor = -1
conf = -1
s = tm.clock()
if order == 1:
conf = Config('DegreeSAD.conf')
elif order == 2:
conf = Config('CoDetector.conf')
elif order == 3:
conf = Config('BayesDetector.conf')
elif order == 4:
conf = Config('SemiSAD.conf')
elif order == 5:
conf = Config('PCASelectUsers.conf')
elif order == 6:
conf = Config('FAP.conf')
elif order == 7:
conf = Config('timeIndex.conf')
else:
print('Error num!')
exit(-1)
# conf = Config('DegreeSAD.conf')
sd = SDLib(conf)
sd.execute()
e = tm.clock()
print("Run time: %f s" % (e - s))
print('='*80)
print('Supervised Methods:')
print('1. DegreeSAD 2.CoDetector 3.BayesDetector\n')
print('Semi-Supervised Methods:')
print('4. SemiSAD\n')
print('Unsupervised Methods:')
print('5. PCASelectUsers 6. FAP 7.timeIndex\n')
print('-'*80)
order = eval(input('please enter the num of the method to run it:'))
algor = -1
conf = -1
s = tm.clock()
if order == 1:
conf = Config('DegreeSAD.conf')
elif order == 2:
conf = Config('CoDetector.conf')
elif order == 3:
conf = Config('BayesDetector.conf')
elif order == 4:
conf = Config('SemiSAD.conf')
elif order == 5:
conf = Config('PCASelectUsers.conf')
elif order == 6:
conf = Config('FAP.conf')
elif order == 7:
conf = Config('timeIndex.conf')
else:
print('Error num!')
exit(-1)
# conf = Config('DegreeSAD.conf')
sd = SDLib(conf)
sd.execute()
e = tm.clock()
print("Run time: %f s" % (e - s))
```
| github_jupyter |
# Deep learning for Natural Language Processing
* Simple text representations, bag of words
* Word embedding and... not just another word2vec this time
* 1-dimensional convolutions for text
* Aggregating several data sources "the hard way"
* Solving ~somewhat~ real ML problem with ~almost~ end-to-end deep learning
Special thanks to Irina Golzmann for help with technical part.
# NLTK
You will require nltk v3.2 to solve this assignment
__It is really important that the version is 3.2, otherwize russian tokenizer might not work__
Install/update
* `sudo pip install --upgrade nltk==3.2`
* If you don't remember when was the last pip upgrade, `sudo pip install --upgrade pip`
If for some reason you can't or won't switch to nltk v3.2, just make sure that russian words are tokenized properly with RegeExpTokenizer.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
# Dataset
Ex-kaggle-competition on job salary prediction

Original conest - https://www.kaggle.com/c/job-salary-prediction
### Download
Go [here](https://www.kaggle.com/c/job-salary-prediction) and download as usual
CSC cloud: data should already be here somewhere, just poke the nearest instructor.
# What's inside
Different kinds of features:
* 2 text fields - title and description
* Categorical fields - contract type, location
Only 1 binary target whether or not such advertisement contains prohibited materials
* criminal, misleading, human reproduction-related, etc
* diving into the data may result in prolonged sleep disorders
```
df = pd.read_csv("./Train_rev1.csv",sep=',')
print df.shape, df.SalaryNormalized.mean()
df[:5]
```
# Tokenizing
First, we create a dictionary of all existing words.
Assign each word a number - it's Id
```
from nltk.tokenize import RegexpTokenizer
from collections import Counter,defaultdict
tokenizer = RegexpTokenizer(r"\w+")
#Dictionary of tokens
token_counts = Counter()
#All texts
all_texts = np.hstack([df.FullDescription.values,df.Title.values])
#Compute token frequencies
for s in all_texts:
if type(s) is not str:
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
for token in tokens:
token_counts[token] +=1
```
### Remove rare tokens
We are unlikely to make use of words that are only seen a few times throughout the corpora.
Again, if you want to beat Kaggle competition metrics, consider doing something better.
```
#Word frequency distribution, just for kicks
_=plt.hist(token_counts.values(),range=[0,50],bins=50)
#Select only the tokens that had at least 10 occurences in the corpora.
#Use token_counts.
min_count = 5
tokens = <tokens from token_counts keys that had at least min_count occurences throughout the dataset>
token_to_id = {t:i+1 for i,t in enumerate(tokens)}
null_token = "NULL"
token_to_id[null_token] = 0
print "# Tokens:",len(token_to_id)
if len(token_to_id) < 10000:
print "Alarm! It seems like there are too few tokens. Make sure you updated NLTK and applied correct thresholds -- unless you now what you're doing, ofc"
if len(token_to_id) > 100000:
print "Alarm! Too many tokens. You might have messed up when pruning rare ones -- unless you know what you're doin' ofc"
```
### Replace words with IDs
Set a maximum length for titles and descriptions.
* If string is longer that that limit - crop it, if less - pad with zeros.
* Thus we obtain a matrix of size [n_samples]x[max_length]
* Element at i,j - is an identifier of word j within sample i
```
def vectorize(strings, token_to_id, max_len=150):
token_matrix = []
for s in strings:
if type(s) is not str:
token_matrix.append([0]*max_len)
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
token_ids = map(lambda token: token_to_id.get(token,0), tokens)[:max_len]
token_ids += [0]*(max_len - len(token_ids))
token_matrix.append(token_ids)
return np.array(token_matrix)
desc_tokens = vectorize(df.FullDescription.values,token_to_id,max_len = 500)
title_tokens = vectorize(df.Title.values,token_to_id,max_len = 15)
```
### Data format examples
```
print "Matrix size:",title_tokens.shape
for title, tokens in zip(df.Title.values[:3],title_tokens[:3]):
print title,'->', tokens[:10],'...'
```
__ As you can see, our preprocessing is somewhat crude. Let us see if that is enough for our network __
# Non-sequences
Some data features are categorical data. E.g. location, contract type, company
They require a separate preprocessing step.
```
#One-hot-encoded category and subcategory
from sklearn.feature_extraction import DictVectorizer
categories = []
data_cat = df[["Category","LocationNormalized","ContractType","ContractTime"]]
categories = [A list of dictionaries {"category":category_name, "subcategory":subcategory_name} for each data sample]
vectorizer = DictVectorizer(sparse=False)
df_non_text = vectorizer.fit_transform(categories)
df_non_text = pd.DataFrame(df_non_text,columns=vectorizer.feature_names_)
```
# Split data into training and test
```
#Target variable - whether or not sample contains prohibited material
target = df.is_blocked.values.astype('int32')
#Preprocessed titles
title_tokens = title_tokens.astype('int32')
#Preprocessed tokens
desc_tokens = desc_tokens.astype('int32')
#Non-sequences
df_non_text = df_non_text.astype('float32')
#Split into training and test set.
#Difficulty selector:
#Easy: split randomly
#Medium: split by companies, make sure no company is in both train and test set
#Hard: do whatever you want, but score yourself using kaggle private leaderboard
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = <define_these_variables>
```
## Save preprocessed data [optional]
* The next tab can be used to stash all the essential data matrices and get rid of the rest of the data.
* Highly recommended if you have less than 1.5GB RAM left
* To do that, you need to first run it with save_prepared_data=True, then restart the notebook and only run this tab with read_prepared_data=True.
```
save_prepared_data = True #save
read_prepared_data = False #load
#but not both at once
assert not (save_prepared_data and read_prepared_data)
if save_prepared_data:
print "Saving preprocessed data (may take up to 3 minutes)"
import pickle
with open("preprocessed_data.pcl",'w') as fout:
pickle.dump(data_tuple,fout)
with open("token_to_id.pcl",'w') as fout:
pickle.dump(token_to_id,fout)
print "done"
elif read_prepared_data:
print "Reading saved data..."
import pickle
with open("preprocessed_data.pcl",'r') as fin:
data_tuple = pickle.load(fin)
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = data_tuple
with open("token_to_id.pcl",'r') as fin:
token_to_id = pickle.load(fin)
#Re-importing libraries to allow staring noteboook from here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
print "done"
```
# Train the monster
Since we have several data sources, our neural network may differ from what you used to work with.
* Separate input for titles
* cnn+global max or RNN
* Separate input for description
* cnn+global max or RNN
* Separate input for categorical features
* Few dense layers + some black magic if you want
These three inputs must be blended somehow - concatenated or added.
* Output: a simple regression task
```
#libraries
import lasagne
from theano import tensor as T
import theano
#3 inputs and a refere output
title_token_ids = T.matrix("title_token_ids",dtype='int32')
desc_token_ids = T.matrix("desc_token_ids",dtype='int32')
categories = T.matrix("categories",dtype='float32')
target_y = T.vector("is_blocked",dtype='float32')
```
# NN architecture
```
title_inp = lasagne.layers.InputLayer((None,title_tr.shape[1]),input_var=title_token_ids)
descr_inp = lasagne.layers.InputLayer((None,desc_tr.shape[1]),input_var=desc_token_ids)
cat_inp = lasagne.layers.InputLayer((None,nontext_tr.shape[1]), input_var=categories)
# Descriptions
#word-wise embedding. We recommend to start from some 64 and improving after you are certain it works.
descr_nn = lasagne.layers.EmbeddingLayer(descr_inp,
input_size=len(token_to_id)+1,
output_size=?)
#reshape from [batch, time, unit] to [batch,unit,time] to allow 1d convolution over time
descr_nn = lasagne.layers.DimshuffleLayer(descr_nn, [0,2,1])
descr_nn = 1D convolution over embedding, maybe several ones in a stack
#pool over time
descr_nn = lasagne.layers.GlobalPoolLayer(descr_nn,T.max)
#Possible improvements here are adding several parallel convs with different filter sizes or stacking them the usual way
#1dconv -> 1d max pool ->1dconv and finally global pool
# Titles
title_nn = <Process titles somehow (title_inp)>
# Non-sequences
cat_nn = <Process non-sequences(cat_inp)>
nn = <merge three layers into one (e.g. lasagne.layers.concat) >
nn = lasagne.layers.DenseLayer(nn,your_lucky_number)
nn = lasagne.layers.DropoutLayer(nn,p=maybe_use_me)
nn = lasagne.layers.DenseLayer(nn,1,nonlinearity=lasagne.nonlinearities.linear)
```
# Loss function
* The standard way:
* prediction
* loss
* updates
* training and evaluation functions
```
#All trainable params
weights = lasagne.layers.get_all_params(nn,trainable=True)
#Simple NN prediction
prediction = lasagne.layers.get_output(nn)[:,0]
#loss function
loss = lasagne.objectives.squared_error(prediction,target_y).mean()
#Weight optimization step
updates = <your favorite optimizer>
```
### Determinitic prediction
* In case we use stochastic elements, e.g. dropout or noize
* Compile a separate set of functions with deterministic prediction (deterministic = True)
* Unless you think there's no neet for dropout there ofc. Btw is there?
```
#deterministic version
det_prediction = lasagne.layers.get_output(nn,deterministic=True)[:,0]
#equivalent loss function
det_loss = <an excercise in copy-pasting and editing>
```
### Coffee-lation
```
train_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[loss,prediction],updates = updates)
eval_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[det_loss,det_prediction])
```
# Training loop
* The regular way with loops over minibatches
* Since the dataset is huge, we define epoch as some fixed amount of samples isntead of all dataset
```
# Out good old minibatch iterator now supports arbitrary amount of arrays (X,y,z)
def iterate_minibatches(*arrays,**kwargs):
batchsize=kwargs.get("batchsize",100)
shuffle = kwargs.get("shuffle",True)
if shuffle:
indices = np.arange(len(arrays[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(arrays[0]) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [arr[excerpt] for arr in arrays]
```
### Tweaking guide
* batch_size - how many samples are processed per function call
* optimization gets slower, but more stable, as you increase it.
* May consider increasing it halfway through training
* minibatches_per_epoch - max amount of minibatches per epoch
* Does not affect training. Lesser value means more frequent and less stable printing
* Setting it to less than 10 is only meaningfull if you want to make sure your NN does not break down after one epoch
* n_epochs - total amount of epochs to train for
* `n_epochs = 10**10` and manual interrupting is still an option
Tips:
* With small minibatches_per_epoch, network quality may jump up and down for several epochs
* Plotting metrics over training time may be a good way to analyze which architectures work better.
* Once you are sure your network aint gonna crash, it's worth letting it train for a few hours of an average laptop's time to see it's true potential
```
from sklearn.metrics import mean_squared_error,mean_absolute_error
n_epochs = 100
batch_size = 100
minibatches_per_epoch = 100
for i in range(n_epochs):
#training
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_tr,title_tr,nontext_tr,target_tr,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch:break
loss,pred_probas = train_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Train:"
print '\tloss:',b_loss/b_c
print '\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5
print '\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch: break
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Val:"
print '\tloss:',b_loss/b_c
print '\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5
print '\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)
print "If you are seeing this, it's time to backup your notebook. No, really, 'tis too easy to mess up everything without noticing. "
```
# Final evaluation
Evaluate network over the entire test set
```
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Scores:"
print '\tloss:',b_loss/b_c
print '\trmse:',mean_squared_error(epoch_y_true,epoch_y_pred)**.5
print '\tmae:',mean_absolute_error(epoch_y_true,epoch_y_pred)
```
Now tune the monster for least MSE you can get!
# Next time in our show
* Recurrent neural networks
* How to apply them to practical problems?
* What else can they do?
* Why so much hype around LSTM?
* Stay tuned!
| github_jupyter |
## Loading libraries and looking at given data
```
import numpy as np
import pandas as pd
import seaborn as sns
import re
appendix_3=pd.read_excel("Appendix_3_august.xlsx")
appendix_3
print(appendix_3["Language"].value_counts(),)
print(appendix_3["Country"].value_counts())
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(appendix_3['Country'].to_string(index=False))
```
## Removing useless data
```
appendix_3=appendix_3[appendix_3.Language!="Københavnsk"]
appendix_3=appendix_3.drop(["Meaningless_ID"], axis=1)
appendix_3
appendix_3=appendix_3[appendix_3.Licenses!=0]
appendix_3
```
## Making usefull languages
```
def language(var):
"""Function that returns languages spoken by 3Shapes present support teams.
If not spoken, return English"""
if var.lower() in ['english','american']: #If english or "american"
return 'English' #Return English
if var.lower() in ['spanish']:
return 'Spanish'
if var.lower() in ['french']:
return 'French'
if var.lower() in ['german']:
return 'German'
if var.lower() in ['russian']:
return 'Russian'
if var.lower() in ['portuguese']:
return 'Portuguese'
if var.lower() in ['italian']:
return 'Italian'
if re.search('chin.+', var.lower()): # If lettercombination 'chin' appears:
return 'Chinese' # Return 'Chinese'
if var.lower() in ['japanese']:
return 'Japanese'
if var.lower() in ['korean']:
return 'Korean'
else:
return 'English' #If not spoken, return English
appendix_3['Support_language'] = appendix_3['Language'].apply(language)
appendix_3['Support_language'].value_counts()
appendix_3["Licenses_per_language"]=appendix_3.groupby(["Support_language"])["Licenses"].transform("sum")
appendix_3['Country'] = appendix_3['Country'].str.strip() #Removing initial whitespace
appendix_3.iloc[1,0]
```
## Making a column that "groups" countries into 3 regions/timezones of the world (Americas, Europe (incl. Middle East and Africa) and Asia)
```
def region(var):
"""Function that returns region based on country"""
if var in ['United States','Canada','Brazil','Mexico','Colombia','Argentina','Uruguay',
'Costa Rica','Chile','Paraguay','Bolivia','Venezuela','Puerto Rico']:
return 'Americas'
if var in ['France','Italy','Germany','United Kingdom','Spain','Netherlands','Ireland','Poland',
'Denmark','Switzerland','United Arab Emirates','Sweden','Norway','Belgium','Austria',
'Lebanon','Israel','Slovakia','Greece','Romania','Turkey','Czech Republic','South Africa',
'Finland','Lithuania','Russia','Hungary','Ukraine','Pakistan','Croatia','Iceland','Morocco',
'Egypt','Kuwait','Bulgaria','Iran','Luxembourg','Serbia','Slovenia','Tunisia','Estonia',
'Saudi Arabia','Portugal','Jordan','Cyprus','Armenia','Moldova','Azerbaijan','Algeria',
'Monaco','Georgia','Iraq','Liechtenstein','Latvia']:
return 'Europe'
if var in ['Korea','Australia','China','Singapore','Taiwan','Thailand','India','Japan',
'Hong Kong SAR','Vietnam','New Zealand','Philippines','Indonesia','Myanmar',
'Malaysia','Nepal']:
return 'Asia'
else:
return 'No'
appendix_3['Region'] = appendix_3['Country'].apply(region)
appendix_3['Region'].head(6)
appendix_3["Licenses_per_region"]=appendix_3.groupby(["Region"])["Licenses"].transform("sum")
appendix_3[["Licenses_per_region","Region"]].head(6)
```
## New DataFrame with our three regions/support centers
```
New_regions=appendix_3.groupby(["Region"])["Licenses"].sum().sort_values(ascending=False).to_frame().reset_index()
New_regions
def employees_needed(var):
""" Function that gives number of recuired employees based on licenses"""
if var <300:
return 3
else:
return np.ceil((var-300)/200+3)
New_regions["Employ_needed"]=New_regions["Licenses"].apply(employees_needed)
New_regions.head(3)
New_regions["Revenue"]=New_regions["Licenses"]*2000
New_regions.head(3)
```
## Loking at appendix 2 and cleaning useless data, and converting to int.
```
appendix_2=pd.read_excel("Appendix_2_august.xlsx")
appendix_2
appendix_2=appendix_2.drop([5])
appendix_2
appendix_2['Total cost']=appendix_2['Total cost'].astype(int)
appendix_2['Average FTE']=appendix_2['Average FTE'].astype(int)
print(appendix_2.dtypes)
```
## Getting the cost pr. worker pr. support center
```
appendix_2["Cost_per_FTE"]=np.round(appendix_2["Total cost"]/appendix_2["Average FTE"])
appendix_2
```
## Because of trouble with merge, the values are tranferred manually to the new DataFrame
```
def regional_center(var):
""" Quick function that gives the location of support center"""
if var in ['Europe']:
return 'Ukraine'
if var in ['Americas']:
return 'USA'
if var in ['Asia']:
return 'China'
New_regions["Support Center"]=New_regions["Region"].apply(regional_center)
New_regions.head(3)
New_regions['Cost per FTE']=[17105,83333,250000]
New_regions
```
## Altering the order of the columns to a more intiutive layout
```
print(list(New_regions.columns.values)) #
New_regions=New_regions[['Support Center','Region', 'Licenses','Revenue','Employ_needed','Cost per FTE','Total cost']]
New_regions
```
## Calculation cost and balance values
```
New_regions['Total cost']=New_regions['Employ_needed']*New_regions['Cost per FTE']
New_regions
New_regions=New_regions.assign(Balance=New_regions['Revenue'] - New_regions['Total cost'])
New_regions
```
## Making a new DataFrame for the whole project
```
Whole_project=pd.DataFrame()
Whole_project['Licenses']=[New_regions['Licenses'].sum(axis=0)]
Whole_project['Revenue']=[New_regions['Revenue'].sum(axis=0)]
Whole_project['Employ_needed']=[New_regions['Employ_needed'].sum(axis=0)]
Whole_project['Total cost']=[New_regions['Total cost'].sum(axis=0)]
Whole_project['Balance']=[New_regions['Balance'].sum(axis=0)]
Whole_project
Whole_project['Balance before']=(appendix_3['Licenses'].sum(axis=0)*2000*0.7)-appendix_2['Total cost'].sum(axis=0)
Whole_project['Gain']=Whole_project['Balance']-Whole_project['Balance before']
Whole_project
Whole_project['Balance + savings']=Whole_project['Balance']+(appendix_2.iloc[0]['Total cost']+appendix_2.iloc[3]['Total cost'])
Whole_project['Gain + savings']=Whole_project['Balance + savings']-Whole_project['Balance before']
Whole_project
```
## Looking at the 3-year forecast with different adoption rates
First off is 10% adoption rate, then 50%, and finally 100%
```
def adoption(df_out_name,df_in_name,adoption_rate):
""" A function that takes an adoption rate as input, and calculates usefull parameters
(licenses, revenue, employees needed, cost and balance) after 3 years.
An annual growth rate of 10% is given """
df_in_name[f'{adoption_rate} adoption, licenses']=round(df_in_name['Licenses']*(1.1**3)*adoption_rate)
df_in_name[f'{adoption_rate} adoption, revenue']=df_in_name[f'{adoption_rate} adoption, licenses']*2000
df_in_name[f'{adoption_rate} adoption, employ_needed']=np.ceil((df_in_name[f'{adoption_rate} adoption, licenses']-300)/200+3)
df_in_name[f'{adoption_rate} adoption, total cost']=round(df_in_name[f'{adoption_rate} adoption, employ_needed']*((New_regions.iloc[0,5]*New_regions.iloc[0,4])+(New_regions.iloc[1,5]*New_regions.iloc[1,4])+(New_regions.iloc[2,5]*New_regions.iloc[2,4]))/New_regions['Employ_needed'].sum())
df_in_name[f'{adoption_rate} adoption, balance']=df_in_name[f'{adoption_rate} adoption, revenue']-df_in_name[f'{adoption_rate} adoption, total cost']
df_out_name=df_in_name
return df_out_name
adoption('Whole_project_10',Whole_project,0.1)
adoption('Whole_project_50',Whole_project,0.5)
adoption('Whole_project_50',Whole_project,1)
with pd.ExcelWriter('samlet.xlsx') as writer:
appendix_3.to_excel(writer, sheet_name='Lande,sprog og licenser')
appendix_2.to_excel(writer, sheet_name='Supportcenter og omkostninger')
license_country.to_excel(writer, sheet_name='Licenser pr. supportsprog')
with pd.ExcelWriter('samlet_2.xlsx') as writer:
New_regions.to_excel(writer, sheet_name='De tre supportcentre')
Whole_project.to_excel(writer, sheet_name='Hele projektet')
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
#How to train Boosted Trees models in TensorFlow
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/estimators/boosted_trees"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a>
</td>
</table>
This tutorial is an end-to-end walkthrough of training a Gradient Boosting model using decision trees with the `tf.estimator` API. Boosted Trees models are among the most popular and effective machine learning approaches for both regression and classification. It is an ensemble technique that combines the predictions from several (think 10s, 100s or even 1000s) tree models.
Boosted Trees models are popular with many machine learning practioners as they can achieve impressive performance with minimal hyperparameter tuning.
## Load the titanic dataset
You will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc.
```
from __future__ import absolute_import, division, print_function, unicode_literals
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
tf.enable_eager_execution()
tf.logging.set_verbosity(tf.logging.ERROR)
tf.set_random_seed(123)
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
```
The dataset consists of a training set and an evaluation set:
* `dftrain` and `y_train` are the *training set*—the data the model uses to learn.
* The model is tested against the *eval set*, `dfeval`, and `y_eval`.
For training you will use the following features:
<table>
<tr>
<th>Feature Name</th>
<th>Description</th>
</tr>
<tr>
<td>sex</td>
<td>Gender of passenger</td>
</tr>
<tr>
<td>age</td>
<td>Age of passenger</td>
</tr>
<tr>
<td>n_siblings_spouses</td>
<td># siblings and partners aboard</td>
</tr>
<tr>
<td>parch</td>
<td># of parents and children aboard</td>
</tr>
<tr>
<td>fare</td>
<td>Fare passenger paid.</td>
</tr>
<tr>
<td>class</td>
<td>Passenger's class on ship</td>
</tr>
<tr>
<td>deck</td>
<td>Which deck passenger was on</td>
</tr>
<tr>
<td>embark_town</td>
<td>Which town passenger embarked from</td>
</tr>
<tr>
<td>alone</td>
<td>If passenger was alone</td>
</tr>
</table>
## Explore the data
Let's first preview some of the data and create summary statistics on the training set.
```
dftrain.head()
dftrain.describe()
```
There are 627 and 264 examples in the training and evaluation sets, respectively.
```
dftrain.shape[0], dfeval.shape[0]
```
The majority of passengers are in their 20's and 30's.
```
dftrain.age.hist(bins=20)
plt.show()
```
There are approximately twice as male passengers as female passengers aboard.
```
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
```
The majority of passengers were in the "third" class.
```
(dftrain['class']
.value_counts()
.plot(kind='barh'))
plt.show()
```
Most passengers embarked from Southampton.
```
(dftrain['embark_town']
.value_counts()
.plot(kind='barh'))
plt.show()
```
Females have a much higher chance of surviving vs. males. This will clearly be a predictive feature for the model.
```
ax = (pd.concat([dftrain, y_train], axis=1)\
.groupby('sex')
.survived
.mean()
.plot(kind='barh'))
ax.set_xlabel('% survive')
plt.show()
```
## Create feature columns and input functions
The Gradient Boosting estimator can utilize both numeric and categorical features. Feature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. Additionally they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization. In this tutorial, the fields in `CATEGORICAL_COLUMNS` are transformed from categorical columns to one-hot-encoded columns ([indicator column](https://www.tensorflow.org/api_docs/python/tf/feature_column/indicator_column)):
```
fc = tf.feature_column
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return fc.indicator_column(
fc.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(fc.numeric_column(feature_name,
dtype=tf.float32))
```
You can view the transformation that a feature column produces. For example, here is the output when using the `indicator_column` on a single example:
```
example = dftrain.head(1)
class_fc = one_hot_cat_column('class', ('First', 'Second', 'Third'))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', fc.input_layer(dict(example), [class_fc]).numpy())
```
Additionally, you can view all of the feature column transformations together:
```
fc.input_layer(dict(example), feature_columns).numpy()
```
Next you need to create the input functions. These will specify how data will be read into our model for both training and inference. You will use the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. This is suitable for smaller, in-memory datasets. For larger datasets, the tf.data API supports a variety of file formats (including [csv](https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset)) so that you can process datasets that do not fit in memory.
```
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
y = np.expand_dims(y, axis=1)
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
```
## Train and evaluate the model
Below you will do the following steps:
1. Initialize the model, specifying the features and hyperparameters.
2. Feed the training data to the model using the `train_input_fn` and train the model using the `train` function.
3. You will assess model performance using the evaluation set—in this example, the `dfeval` DataFrame. You will verify that the predictions match the labels from the `y_eval` array.
Before training a Boosted Trees model, let's first train a linear classifier (logistic regression model). It is best practice to start with simpler model to establish a benchmark.
```
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
results = linear_est.evaluate(eval_input_fn)
print('Accuracy : ', results['accuracy'])
print('Dummy model: ', results['accuracy_baseline'])
```
Next let's train a Boosted Trees model. For boosted trees, regression (`BoostedTreesRegressor`) and classification (`BoostedTreesClassifier`) are supported, along with using any twice differentiable custom loss (`BoostedTreesEstimator`). Since the goal is to predict a class - survive or not survive, you will use the `BoostedTreesClassifier`.
```
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
results = est.evaluate(eval_input_fn)
print('Accuracy : ', results['accuracy'])
print('Dummy model: ', results['accuracy_baseline'])
```
For performance reasons, when your data fits in memory, it is recommended to use the `boosted_trees_classifier_train_in_memory` function. However if training time is not of a concern or if you have a very large dataset and want to do distributed training, use the `tf.estimator.BoostedTrees` API shown above.
When using this method, you should not batch your input data, as the method operates on the entire dataset.
```
def make_inmemory_train_input_fn(X, y):
y = np.expand_dims(y, axis=1)
def input_fn():
return dict(X), y
return input_fn
train_input_fn = make_inmemory_train_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
est = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(
train_input_fn,
feature_columns)
print(est.evaluate(eval_input_fn)['accuracy'])
```
Now you can use the train model to make predictions on a passenger from the evaluation set. TensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. Earlier, the `eval_input_fn` is defined using the entire evaluation set.
```
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
```
Finally you can also look at the receiver operating characteristic (ROC) of the results, which will give us a better idea of the tradeoff between the true positive rate and false positive rate.
```
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_parent" href="https://github.com/giswqs/geemap/tree/master/tutorials/ImageCollection/01_image_collection_overview.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_parent" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/ImageCollection/01_image_collection_overview.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_parent" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/ImageCollection/01_image_collection_overview.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
# ImageCollection Overview
An `ImageCollection` is a stack or time series of images. In addition to loading an `ImageCollection` using an Earth Engine collection ID, Earth Engine has methods to create image collections. The constructor `ee.ImageCollection()` or the convenience method `ee.ImageCollection.fromImages()` create image collections from lists of images. You can also create new image collections by merging existing collections. For example:
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.foliumap as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Create arbitrary constant images.
constant1 = ee.Image(1)
constant2 = ee.Image(2)
# Create a collection by giving a list to the constructor.
collectionFromConstructor = ee.ImageCollection([constant1, constant2])
print('collectionFromConstructor: ', collectionFromConstructor.getInfo())
# Create a collection with fromImages().
collectionFromImages = ee.ImageCollection.fromImages(
[ee.Image(3), ee.Image(4)])
print('collectionFromImages: ', collectionFromImages.getInfo())
# Merge two collections.
mergedCollection = collectionFromConstructor.merge(collectionFromImages)
print('mergedCollection: ', mergedCollection.getInfo())
# Create an ee.Geometry.
polygon = ee.Geometry.Polygon([
[[-35, -10], [35, -10], [35, 10], [-35, 10], [-35, -10]]
])
# Create a toy FeatureCollection
features = ee.FeatureCollection(
[ee.Feature(polygon, {'foo': 1}), ee.Feature(polygon, {'foo': 2})])
print(features.getInfo())
# Create an ImageCollection from the FeatureCollection
# by mapping a function over the FeatureCollection.
images = features.map(lambda feature: ee.Image(ee.Number(feature.get('foo'))))
# Print the resultant collection.
print('Image collection: ', images.getInfo())
```
## Display Earth Engine data layers
```
Map.addLayerControl()
Map
```
Note that in this example an `ImageCollection` is created by mapping a function that returns an `Image` over a `FeatureCollection`. Learn more about mapping in the [Mapping over an ImageCollection section](https://developers.google.com/earth-engine/ic_mapping.html). Learn more about feature collections from the [FeatureCollection section](https://developers.google.com/earth-engine/feature_collections.html).
| github_jupyter |
# Regarding this Notebook
This is a replication of the original analysis performed in the paper by [Waade & Enevoldsen 2020](missing). This replication script will not be updated as it is intended for reproducibility. Any deviations from the paper is marked with bold for transparency.
Footnotes and internal documentation references are removed from this example to avoid confusion.
---
# 2.2 Using tomsup
One of the advantages of computational models of cognitive processes is that the implications of the model can be worked out by simulating the model’s behavior in a variety of situations. tomsup in particular, allows to test the k-ToM model as it plays a wide set of game-theoretical situations (e.g. Matching Pennies or Prisoner’s Dilemma), in interaction with a variety of different agents (e.g. other k-ToM or less sophisticated agents), within different possible settings (e.g. repeated interactions with the same opponent, or round robin tournaments). In order to better understand the setup of the tomsup package, we start with the case of two simple agents interacting, followed by a simple exampleusing k-ToM agents, which will also illustrate how one might implement tomsup in an experiment. Lastly, we will show how to run a simulation using multiple agents as well as how to plot the evolving internal states of a k-ToM agent. In this simple scenario two agents are playing the Matching Pennies game. One agent hides a penny in one hand: let’s say chooses 0 for hiding in the left hand, and 1 in the right. The other agent has to guess where the penny is. If the second agent guesses (chooses the same hand as the first), it wins and the first loses. In other words, the first agent wants to choose the hand that the second will not choose and the second wants to choose the hand that the first chooses. In this example, one of the agents implements the Random Bias strategy (e.g. has a 60 percent probability of choosing right over left), while the other implements a classic Q-learning strategy (a model free reinforcement learning mechanism updating the expected reward of choosing a specific option on a trial by trial basis). The full list of strategies already implemented in tomsup is accessible using the function `valid_agents()`. The user first has to install the tomsup package developed using python 3.6 (Van Rossum & Drake, 2009). The package can be downloaded and installed using pip:
```pip3 install tomsup```
**However, in this notebook we will assume the user simply downloaded the git. Feel free to skip the next code chunk if that is not the case.**
```
# assuming you are in the github folder change the path - not relevant if tomsup is installed via. pip
import os
os.chdir("..") # go out of the tutorials folder
```
Both approaches will also install the required dependencies. Now tomsup can be imported into Python following the lines;
```
import tomsup as ts
```
We will also set a arbitrary seed for to ensure reproducibility;
```
import random
import numpy as np
np.random.seed(1995)
random.seed(1995) # The year of birth of the first author
```
First we need to set up the Matching Pennies game. As different games are defined by different payoff matrices, we set up the game by creating the appropriate payoff matrix using the ```PayoffMatrix``` class.
```
# initiate the competitive matching pennies game
penny = ts.PayoffMatrix(name="penny_competitive")
# print the payoff matrix
print(penny)
```
The Matching Pennies game is a zero sum game, meaning that for one agent to get a reward, the opponent has to lose. Agents have thus to predict their opponents' behavior, which is ideal for investigating \gls{tom}. Note that to explore other payoff matrices included in the package, or to learn how to specify a custom payoff matrix, the user can type the `help(ts.PayoffMatrix)` command.
Then we create the first of the two competing agents:
```
# define the random bias agent, which chooses 1 70 percent of the time, and call the agent "jung"
jung = ts.RB(bias=0.7)
# Examine Agent
print(f"jung is a class of type: {type(jung)}")
if isinstance(jung, ts.Agent):
print(f"but jung is also an instance of the parent class ts.Agent")
# let us have Jung make a choice
choice = jung.compete()
print(f"jung chose {choice} and its probability for choosing 1 was {jung.get_bias()}.")
```
Note that it is possible to create one or more agents simultaneously using the convenient `create\_agents()` and passing any starting parameters to it in the form of a dictionary.
```
# create a reinforcement learning agent
skinner = ts.create_agents(agents="QL", start_params={"save_history": True})
```
Now that both agents are created, we have them play against each other.
```
# have the agents compete for 30 rounds
results = ts.compete(jung, skinner, p_matrix=penny, n_rounds=30)
# examine results
print(results.head()) # inspect the first 5 rows of the dataframe
```
** Note: you can remove the print() to get a nicer printout of the dataframe **
```
results.head() # inspect the first 5 rows of the dataframe
```
The data frame stores the choice of each agent as well as their resulting payoff. Simply summing the payoff columns would determine the winner.
## k-ToM
Here we will present some simple examples of the k-ToM agent. For a more in-depth description we recommend checking the expanded introduction on the [Github repository](https://github.com/KennethEnevoldsen/tomsup/blob/master/tutorials/introduction_to_tom.ipynb).
We will start of by creating a 0-ToM with default priors and `save_history=True` to examine the workings of it. Notice that setting `save_history` is turned off by default to save on memory which is especially problematic for ToM agents with high sophistication level.
```
# Creating a simple 1-ToM with default parameters
tom_1 = ts.TOM(level=1, dilution=None, save_history=True)
# Extract the parameters
tom_1.print_parameters()
```
Note that k-ToM agents as default uses agnostic starting beliefs. These can be shown in detail and specified as desired, as shown in **appendix in the paper**.
To increase the agent's tendency to choose one we could simply increase its bias. Similarly, if we want the agent to behave in a more more deterministic fashion we can decrease the behavioural temperature. When the parameter values are set, we can play the agent against an opponent using the `.compete()` method. Where `agent` denote the agent in the payoff matrix (0 or 1) and the `op_choice` denote the choice of the opponent during the previous round.
```
tom_2 = ts.TOM(
level=2,
volatility=-2,
b_temp=-2, # more deterministic
bias=0,
dilution=None,
save_history=True,
)
choice = tom_2.compete(p_matrix=penny, agent=0, op_choice=None)
print("tom_2 chose:", choice)
```
The user is recommended to have the 1-ToM and the 2-ToM agents compete using the previously presented `ts.compete()` function for simplicity. However, to make the process more transparent for the user in the following we create a simple for-loop:
```
tom_2.reset() # reset before start
prev_choice_1tom = None
prev_choice_2tom = None
for trial in range(1, 4):
# note that op_choice is choice on previous turn
# and that agent is the agent you respond to in the payoff matrix
choice_1 = tom_1.compete(p_matrix=penny, agent=0, op_choice=prev_choice_1tom)
choice_2 = tom_2.compete(p_matrix=penny, agent=1, op_choice=prev_choice_2tom)
# update previous choice
prev_choice_1tom = choice_1
prev_choice_2tom = choice_2
print(
f"Round {trial}",
f" 1-ToM choose {choice_1}",
f" 2-ToM choose {choice_2}",
sep="\n",
)
```
A for loop like this can be used to implement k-ToM in an experimental setting by replacing the agent with the behavior of a participant. Examples of such implementations (interfacing with PsychoPy are available in the [documentation](https://github.com/KennethEnevoldsen/tomsup/tree/master/tutorials/psychopy_experiment)).
```
tom_2.print_internal(
keys=["p_k", "p_op"], level=[0, 1] # print these two states
) # for the agent simulated opponents 0-ToM and 1-ToM
```
For instance, we can note that the estimate of the opponent's sophistication level (\texttt{p\_k}) slightly favors a 1-ToM as opposed to a 0-ToM and that the average probability of the opponent choosing one (`p_op`) slightly favors 1 (which was indeed the option the opponent chose). These estimates are quite uncertain due to the few rounds played. More information on how to interpret the internal states of the ToM agent is available in the documentation of the package, e.g. by using the help function `help(tom_2.print_internal)`
## Multiple Agents and Visualizing Results
The above syntax is useful for small setups. However, the user might want to build larger simulations involving several agents to simulate data for experimental setup or test underlying assumptions. The package provides syntax for quickly iterating over multiple agents, rounds and even simulations. We will here show a quick example along with how to visualize the results and internal states of ToM agents.
```
# Create a list of agents
agents = ["RB", "QL", "WSLS", "1-TOM", "2-TOM"]
# And set their starting parameters. An empty dictionary denotes default values
start_params = [{"bias": 0.7}, {"learning_rate": 0.5}, {}, {}, {}]
group = ts.create_agents(agents, start_params) # create a group of agents
# Specify the environment
# round_robin e.g. each agent will play against all other agents
group.set_env(env="round_robin")
# Finally, we make the group compete 20 simulations of 30 rounds
results = group.compete(p_matrix=penny, n_rounds=30, n_sim=20, save_history=True)
```
Following the simulation, a data frame can be extracted as before, with additional columns reporting simulation number, competing agent pair (`agent0` and `agent1`) and if `save_history=True` it will also add two columns denoting the internal states of each agent, e.g. estimates and expectations at each trial.
```
res = group.get_results()
print(res.head(1)) # print the first row
```
**Again, removing the print statement gives you a more readable output**
```
res.head(1)
```
** to allow other authors to examine these results we have also saved the results to a new lines delimited .ndjson**
```
res.to_json("tutorials/paper.ndjson", orient="records", lines=True)
```
The package also provides convenient functions for plotting the agent's choices and performance.
> for nicer plots we will increase the figure size using the following code. This is excluded from the paper for simplicity
```
import matplotlib.pyplot as plt
# Set figure size
plt.rcParams["figure.figsize"] = [10, 10]
# plot a heatmap of the rewards for all agent in the tournament
group.plot_heatmap(cmap="RdBu_r")
plt.rcParams["figure.figsize"] = [5, 5]
# plot the choices of the 1-ToM agent when competing against the WSLS agent
group.plot_choice(agent0="WSLS", agent1="1-TOM", agent=1)
# plot the choices of the 1-ToM agent when competing against the WSLS agent
group.plot_choice(agent0="RB", agent1="1-TOM", agent=1)
# plot the score of the 1-ToM agent when competing against the WSLS agent
group.plot_score(agent0="WSLS", agent1="1-TOM", agent=1)
# plot the score of the 2-ToM agent when competing against the WSLS agent
group.plot_score(agent0="WSLS", agent1="2-TOM", agent=1)
```
As seen in the heatmap we see that the k-ToM model compares favorably against simpler agents
such as the QL. Furthermore notice that the 1-ToM and 2-ToM compares especially favorably
against the WSLS agent as this agent act as a deterministic 0-ToM. Similarly, we see that the
2-ToM agent incurs a cost for being more complex by being less able to take advantage of the
deterministic nature of WSLS. We can examine this further in the figures, where we see that the
1-ToM is almost perfectly able to predict the behaviour of the WSLS agent after a turn 5
across simulations while the 2-ToM, take longer to estimate the behaviour. The figures also show
that 1-ToM differs in behavioural patterns figures when playing against a RB agents showing
a bias estimation behaviour, while when playing against the WSLS it shows a oscillating
choice pattern. Ultimately these are meant for initial investigation and more elaborate plots
can be constructed from the results data frame.
> here we just refer to the figures, for more exact references please see the paper
Besides these general plots the package also contains a series of shortcuts for plotting $k$-ToM's internal states such as its estimate of its opponent's sophistication level, in which it is seen that the 2-ToM correctly estimates the opponents estimates as having a sophistication level of 1 on average.
```
# plot 2-ToM estimate of its opponent sophistication level
group.plot_p_k(agent0="1-TOM", agent1="2-TOM", agent=1, level=0)
group.plot_p_k(agent0="1-TOM", agent1="2-TOM", agent=1, level=1)
```
It is also easy to plot k-ToM's estimates of its opponent's model parameters. As an example, the following code plots the 2-ToM's estimate of 1-ToM's volatility and bias. We see that the ToM agent approaches a correct estimate of the default volatility of -2 as well as correctly estimated its opponent as having no inherent bias.
```
# plot 2-ToM estimate of its opponent's volatility while believing the opponent to be level 1.
group.plot_tom_op_estimate(
agent0="1-TOM", agent1="2-TOM", agent=1, estimate="volatility", level=1, plot="mean"
)
# plot 2-ToM estimate of its opponent's bias while believing the opponent to be level 1.
group.plot_tom_op_estimate(
agent0="1-TOM", agent1="2-TOM", agent=1, estimate="bias", level=1, plot="mean"
)
```
Use `help(ts.AgentGroup.plot_tom_op_estimate)` for information on how to plot the other estimated parameters or k-ToM's uncertainty in these parameters.
Additional information can be found in the history column in the results data frame, if needed. This includes all k-ToM's internal states (the changing variables in the model) which for example include choice probability, gradient, estimate uncertainties as well as k-ToM's estimates of its opponent's internal states. Documentation, examples and further tutorials can be found on the Github repository, this also includes a more in-depth description of the dynamics of **the k-ToM model implementation**.
---
## Are you left with any questions?
Feel free to open a github issue with questions and or bug reports.
Best,
*Enevoldsen and Waade*
| github_jupyter |
# Building the dataset
In this notebook, I'm going to be working with three datasets to create the dataset that the chatbot will be trained on.
```
import pandas as pd
files_path = 'D:/Sarcastic Chatbot/Input/'
```
# First dataset
**The Wordball Joke Dataset**, [link](https://www.kaggle.com/bfinan/jokes-question-and-answer/).
This dataset consists of three files, namely:
1. <i>qajokes1.1.2.csv</i>: with <i>75,114</i> pairs.
2. <i>t_lightbulbs.csv</i>: with <i>2,640</i> pairs.
3. <i>t_nosubject.csv</i>: with <i>32,120</i> pairs.
However, I'm not going to incorporate <i>t_lightbulbs.csv</i> in my dataset because I don't want that many examples of one topic. Besides, all the examples are similar in structure (they all start with <i>how many</i>).
Read the data files into pandas dataframes:
```
wordball_qajokes = pd.read_csv(files_path + 'qajokes1.1.2.csv', usecols=['Question', 'Answer'])
wordball_nosubj = pd.read_csv(files_path + 't_nosubject.csv', usecols=['Question', 'Answer'])
print(len(wordball_qajokes))
print(len(wordball_nosubj))
wordball_qajokes.head()
wordball_nosubj.head()
```
Concatenate both dataframes into one:
```
wordball = pd.concat([wordball_qajokes, wordball_nosubj], ignore_index=True)
wordball.head()
print(f"Number of question-answer pairs in the Wordball dataset: {len(wordball)}")
```
## Text Preprocessing
It turns out that not all cells are of type string. So, we can just apply the *str* function to make sure that all of them are of the same desired type.
```
wordball = wordball.applymap(str)
```
Let's look at the characters used in this dataset:
```
def distinct_chars(data, cols):
"""
This method takes in a pandas dataframe and prints all distinct characters.
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
"""
if cols is None:
cols = list(data.columns)
# join all questions into one string
questions = ' '.join(data[cols[0]])
# join all answers into one string
answers = ' '.join(data[cols[1]])
# get distinct characters used in the data (all questions and answers)
dis_chars = set(questions+answers)
# print the distinct characters that are used in the data
print(f"Number of distinct characters used in the dataset: {len(dis_chars)}")
# print(dis_chars)
dis_chars = list(dis_chars)
# Now let's print those characters in an organized way
digits = [char for char in dis_chars if char.isdigit()]
alphabets = [char for char in dis_chars if char.isalpha()]
special = [char for char in dis_chars if not (char.isdigit() | char.isalpha())]
# sort them to make them easier to read
digits = sorted(digits)
alphabets = sorted(alphabets)
special = sorted(special)
print(f"Digits: {digits}")
print(f"Alphabets: {alphabets}")
print(f"Special characters: {special}")
distinct_chars(wordball, ['Question', 'Answer'])
```
The following function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
```
def clean_text(text):
"""
This method takes a string, applies different text preprocessing (characters replacement, removal of unwanted characters,
removal of extra whitespaces) operations and returns a string.
text: a string.
"""
import re
text = str(text)
# REPLACEMENT
# replace " with ' (because they basically mean the same thing)
# text = text.replace('\"','\'')
text = re.sub('\"', '\'', text)
# replace “ and ” with '
# text = text.replace("“",'\'').replace("”",'\'')
text = re.sub("“", '\'', text)
text = re.sub("”", '\'', text)
# replace ’ with '
# text = text.replace('’','\'')
text = re.sub('’', '\'', text)
# replace [] and {} with ()
#text = text.replace('[','(').replace(']',')').replace('{','(').replace('}',')')
text = re.sub('\[','(', text)
text = re.sub('\]',')', text)
text = re.sub('\{','(', text)
text = re.sub('\}',')', text)
# replace ? with itself and a whitespace preceding it
# ex. what's your name? (we want the word name and question mark to be separate tokens)
# text = re.sub('\?', ' ?', text)
# creating a space between a word and the punctuation following it
# punctuation we're using: . , : ; ' ? ! + - * / = % $ @ & ( )
text = re.sub("([?.!,:;'?!+\-*/=%$@&()])", r" \1 ", text)
# REMOVAL OF UNWANTED CHARACTERS
# accept only alphanumeric and some special characters and remove all others
# a-zA-Z0-9 : matches any alphanumeric character and the underscore.
# \. : matches .
# \, : matches ,
# \: : matches :
# \; : matches ;
# \' : matches '
# \? : matches ?
# \! : matches !
# \+ : matches +
# \- : matches -
# \* : matches *
# \/ : matches /
# \= : matches =
# \% : matches %
# \$ : matches $
# \@ : matches @
# \& : matches &
# ^ is added to the beginning of the set to express that we want the regex to recognize all other characters except
# these that are explicitly specified, so that we can omit them.
# define the pattern
pattern = re.compile('[^a-zA-Z0-9_\.\,\:\;\'\?\!\+\-\*\/\=\%\$\@\&\(\)]')
# remove unwanted characters
text = re.sub(pattern, ' ', text)
# lower case the characters in the string
text = text.lower()
# REMOVAL OF EXTRA WHITESPACES
# remove duplicated spaces
text = re.sub(' +', ' ', text)
# remove leading and trailing spaces
text = text.strip()
return text
```
Let's try it out:
```
clean_text("A nice quote I read today: “Everything that you are going through is preparing you for what you asked for”. @hi % & =+-*/")
```
The following method prints a question-answer pair from the dataset, it will be helpful to give us a sense of what the *clean_text* function results in:
```
def print_question_answer(df, index, cols):
print(f"Question: ({index})")
print(df.loc[index][cols[0]])
print(f"Answer: ({index})")
print(df.loc[index][cols[1]])
print("Before applying text preprocessing:")
print_question_answer(wordball, 102, ['Question', 'Answer'])
print_question_answer(wordball, 200, ['Question', 'Answer'])
print_question_answer(wordball, 88376, ['Question', 'Answer'])
print_question_answer(wordball, 94351, ['Question', 'Answer'])
```
Apply text preprocessing (characters replacement, removal of unwanted characters, removal of extra whitespaces):
```
wordball = wordball.applymap(clean_text)
print("After applying text preprocessing:")
print_question_answer(wordball, 102, ['Question', 'Answer'])
print_question_answer(wordball, 200, ['Question', 'Answer'])
print_question_answer(wordball, 88376, ['Question', 'Answer'])
print_question_answer(wordball, 94351, ['Question', 'Answer'])
```
The following function applies some preprocessing operations on the data, concretely:
1. Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*
2. Drops rows with empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *
3. Drops rows with more than 30 words in either the question or the answer or if the answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*
```
def preprocess_data(data, cols):
"""
This method preprocess data and does the following:
1. drops unecessary duplicate pairs.
2. drops rows with empty strings.
3. drops rows with more than 30 words in either the question or the answer,
or if the an answer has less than two characters.
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
# (1) Remove unecessary duplicate pairs but keep only one instance of all duplicates.
print('Removing unecessary duplicate pairs:')
data_len_before = len(data) # len of data before removing duplicates
print(f"# of examples before removing duplicates: {data_len_before}")
# drop duplicates
data = data.drop_duplicates(keep='first')
data_len_after = len(data) # len of data after removing duplicates
print(f"# of examples after removing duplicates: {data_len_after}")
print(f"# of removed duplicates: {data_len_before-data_len_after}")
# (2) Drop rows with empty strings.
print('Removing empty string rows:')
if cols is None:
cols = list(data.columns)
data_len_before = len(data) # len of data before removing empty strings
print(f"# of examples before removing rows with empty question/answers: {data_len_before}")
# I am going to use boolean masking to filter out rows with an empty question or answer
data = data[(data[cols[0]] != '') & (data[cols[1]] != '')]
# also, the following row results in the same as the above.
# data = data.query('Answer != "" and Question != ""')
data_len_after = len(data) # len of data after removing empty strings
print(f"# of examples after removing with empty question/answers: {data_len_after}")
print(f"# of removed empty string rows: {data_len_before-data_len_after}")
# (3) Drop rows with more than 30 words in either the question or the answer
# or if the an answer has less than two characters.
def accepted_length(qa_pair):
q_len = len(qa_pair[0].split(' '))
a_len = len(qa_pair[1].split(' '))
if (q_len <= 30) & ((a_len <= 30) & (len(qa_pair[1]) > 1)):
return True
return False
print('Removing rows with more than 30 words in either the question or the answer:')
data_len_before = len(data) # len of data before dropping those rows (30+ words)
print(f"# of examples before removing rows with more than 30 words: {data_len_before}")
# filter out rows with more than 30 words
accepted_mask = data.apply(accepted_length, axis=1)
data = data[accepted_mask]
data_len_after = len(data) # len of data after dropping those rows (50+ words)
print(f"# of examples after removing rows with more than 30 words: {data_len_after}")
print(f"# of removed empty rows with more than 30 words: {data_len_before-data_len_after}")
print("Data preprocessing is done.")
return data
wordball = preprocess_data(wordball, ['Question', 'Answer'])
print(f"# of question-answer pairs we have left in the Wordball dataset: {len(wordball)}")
```
Let's look at the characters after cleaning the data:
```
distinct_chars(wordball, ['Question', 'Answer'])
```
# Second Dataset
**reddit /r/Jokes**, [here](https://www.kaggle.com/cuddlefish/reddit-rjokes#jokes_score_name_clean.csv).
This dataset consists of two files, namely:
1. <i>jokes_score_name_clean.csv</i>: with <i>133,992</i> pairs.
2. <i>all_jokes.csv</i>
However, I'm not going to incorporate <i>all_jokes.csv</i> in the dataset because it's so messy.
```
reddit_jokes = pd.read_csv(files_path + 'jokes_score_name_clean.csv', usecols=['q', 'a'])
```
Let's rename the columns to have them aligned with the previous dataset:
```
reddit_jokes.rename(columns={'q':'Question', 'a':'Answer'}, inplace=True)
reddit_jokes.head()
print(len(reddit_jokes))
distinct_chars(reddit_jokes, ['Question', 'Answer'])
```
## Text Preprocessing
```
reddit_jokes = reddit_jokes.applymap(str)
```
Reddit data has some special tags like <i>[removed]</i> or <i>[deleted]</i> (these two mean that the comment has been removed/deleted). Also, they're written in an inconsistent way, i.e. you may find the tag <i>[removed]</i> capitalized or lowercased.<br>
The next function will address reddit tags as follows:
1. Drops rows with deleted, removed or censored tags.
2. Replaces other tags found in text with a whitespace. *(i.e. some comments have tags like <i>[censored], [gaming], [long], [request] and [dirty]</i> and we want to omit these tags from the text)*
```
def clean_reddit_tags(data, cols):
"""
This function removes reddit-related tags from the data and does the following:
1. drops rows with deleted, removed or censored tags.
2. replaces other tags found in text with a whitespace.
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
import re
if cols is None:
cols = list(data.columns)
# First, I'm going to lowercase all the text to address these tags
# however, I'm not going to alter the original dataframe because I don't want text to be lowercased.
data_copy = data.copy()
data_copy[cols[0]] = data_copy[cols[0]].str.lower()
data_copy[cols[1]] = data_copy[cols[1]].str.lower()
# drop rows with deleted, removed or censored tags.
# qa_pair[0] is the question, qa_pair[1] is the answer
mask = data_copy.apply(lambda qa_pair:
False if (qa_pair[0]=='[removed]') | (qa_pair[0]=='[deleted]') | (qa_pair[0]=='[censored]') |
(qa_pair[1]=='[removed]') | (qa_pair[1]=='[deleted]') | (qa_pair[1]=='[censored]')
else True, axis=1)
# drop the rows, notice we're using the mask to filter out those rows
# in the original dataframe 'data', because we don't need it anymore
data = data[mask]
print(f"# of rows dropped with [deleted], [removed] or [censored] tags: {mask.sum()}")
# replaces other tags found in text with a whitespace.
def sub_tag(pair):
"""
This method substitute tags (square brackets with words inside) with whitespace.
Arguments:
pair: a Pandas Series, where the first item is the question and the second is the answer.
Returns:
pair: a Pandas Series.
"""
# \[(.*?)\] is a regex to recognize square brackets [] with anything in between
p=re.compile("\[(.*?)\]")
pair[0] = re.sub(p, ' ', pair[0])
pair[1] = re.sub(p, ' ', pair[1])
return pair
# substitute tags with whitespaces.
data = data.apply(sub_tag, axis=1)
return data
print("Before addressing tags:")
print_question_answer(reddit_jokes, 1825, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 59924, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
```
**Note:** the following cell may take multiple seconds to finish.
```
reddit_jokes = clean_reddit_tags(reddit_jokes, ['Question', 'Answer'])
reddit_jokes
print("After addressing tags:")
# because rows with [removed], [deleted] and [censored] tags have been dropped
# we're not going to print the rows (index=1825, index=59924) since they contain
# those tags, or we're going to have a KeyError
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
```
**Note:** notice the question whose index is 52906, has some leading whitespaces. That's because it had the <i>[Corny]</i> tag and the function replaced it with whitespaces. Also, the question whose index is 1489 has an empty answer and that's because of the fact that the original answer just square brackets with some whitespaces in between. We're going to address all of that next!
Now, let's apply the *clean_text* function on the reddit data.<br>
**Remember:** the *clean_text* function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
```
reddit_jokes = reddit_jokes.applymap(clean_text)
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
```
Everything looks good!<br>
Now, let's apply the *preprocess_data* function on the data.<br>
**Remember:** the *preprocess_data* function applies the following preprocessing operations:
1. Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*
2. Drops rows with empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *
3. Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*
```
reddit_jokes = preprocess_data(reddit_jokes, ['Question', 'Answer'])
print(f"Number of question answer pairs in the reddit /r/Jokes dataset: {len(reddit_jokes)}")
distinct_chars(reddit_jokes, ['Question', 'Answer'])
```
# Third Dataset
**Question-Answer Jokes**, [here](https://www.kaggle.com/jiriroz/qa-jokes).
This dataset consists of one file, namely:
* <i>jokes_score_name_clean.csv</i>: with <i>38,269</i> pairs.
```
qa_jokes = pd.read_csv(files_path + 'jokes.csv', usecols=['Question', 'Answer'])
qa_jokes
print(len(qa_jokes))
distinct_chars(qa_jokes, ['Question', 'Answer'])
```
## Text Preprocessing
If you look at some examples in the dataset, you notice that some examples has 'Q:' at beginning of the question and 'A:' at the beginning of the answer, so we need to get rid of these prefixes because they don't convey useful information.<br>
You also notice some examples where both 'Q:' and 'A:' are found in either the question or the answer, although I'm not going to omit these because they probably convey information and are part of the answer. However, some of them have 'Q:' in the question and 'Q: question A: answer' where the question in the answer is the same question, so we need to fix that.
```
def clean_qa_prefixes(data, cols):
"""
This function removes special prefixes ('Q:' and 'A:') found in the data.
i.e. input="Q: how's your day?" --> output=" how's your day?"
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
def removes_prefixes(pair):
"""
This function removes prefixes ('Q:' and 'A:') from the question and answer.
Examples:
Input: qusetion="Q: what is your favorite Space movie?", answer='A: Interstellar!'
Output: qusetion=' what is your favorite Space movie?', answer=' Interstellar!'
Input: question="Q: how\'s your day?", answer='Q: how\'s your day? A: good, thanks.'
Output: qusetion=" how's your day?", answer='good, thanks.'
Input: qusetion='How old are you?', answer='old enough'
Output: qusetion='How old are you?', answer='old enough'
Arguments:
pair: a Pandas Series, where the first item is the question and the second is the answer.
Returns:
pair: a Pandas Series.
"""
# pair[0] corresponds to the question
# pair[1] corresponds to the answer
# if the question contains 'Q:' and the answer contains 'A:' but doesn't contain 'Q:'
if ('Q:' in pair[0]) and ('A:' in pair[1]) and ('Q:' not in pair[1]):
pair[0] = pair[0].replace('Q:','')
pair[1] = pair[1].replace('A:','')
# if the answer contains both 'Q:' and 'A:'
elif ('A:' in pair[1]) and ('Q:' in pair[1]):
pair[0] = pair[0].replace('Q:','')
# now we should check if the text between 'Q:' and 'A:' is the same text in the question (pair[0])
# because if they are, this means that the question is repeated in the answer and we should address that.
q_start = pair[1].find('Q:') + 2 # index of the start of the text that we want to extract
q_end = pair[1].find('A:') # index of the end of the text that we want to extract
q_txt = pair[1][q_start:q_end].strip()
# if the question is repeated in the answer
if q_txt == pair[0].strip():
# in case the question is repeated in the answer, removes it from the answer
pair[1] = pair[1][q_end+2:].strip()
return pair
return data.apply(removes_prefixes, axis=1)
print("Before removing unnecessary prefixes:")
print_question_answer(qa_jokes, 44, ['Question', 'Answer'])
print_question_answer(qa_jokes, 22, ['Question', 'Answer'])
print_question_answer(qa_jokes, 31867, ['Question', 'Answer'])
qa_jokes = clean_qa_prefixes(qa_jokes, ['Question', 'Answer'])
print("After removing unnecessary prefixes:")
print_question_answer(qa_jokes, 44, ['Question', 'Answer'])
print_question_answer(qa_jokes, 22, ['Question', 'Answer'])
print_question_answer(qa_jokes, 31867, ['Question', 'Answer'])
```
Notice that the third example both 'Q:' and 'A:' are part of the answer and conveys information.
Now, let's apply the *clean_text* function on the Question-Answer Jokes data.<br>
**Remember:** the *clean_text* function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
```
qa_jokes = qa_jokes.applymap(clean_text)
```
Now, let's apply the *preprocess_data* function on the data.<br>
**Remember:** the *preprocess_data* function applies the following preprocessing operations:
1. Drops unnecessary duplicate pairs (rows) but keep only one instance of all duplicates. *(For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)*
2. Drops rows with an empty question/answer. *(These may appear because of the previous step or because they happen to be empty in the original dataset) *
3. Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. *(Note: this is a hyperparameter and you can try other values.)*
```
qa_jokes = preprocess_data(qa_jokes, ['Question', 'Answer'])
print(f"Number of question-answer pairs in the Question-Answer Jokes dataset: {len(qa_jokes)}")
distinct_chars(qa_jokes, ['Question', 'Answer'])
```
# Putting it together
Let's concatenate all the data we have to create our final dataset.
```
dataset = pd.concat([wordball, reddit_jokes, qa_jokes], ignore_index=True)
dataset.head()
print(f"Number of question-answer pairs in the dataset: {len(dataset)}")
```
There may be duplicate examples in the data so let's drop them:
```
data_len_before = len(dataset) # len of data before removing duplicates
print(f"# of examples before removing duplicates: {data_len_before}")
# drop duplicates
dataset = dataset.drop_duplicates(keep='first')
data_len_after = len(dataset) # len of data after removing duplicates
print(f"# of examples after removing duplicates: {data_len_after}")
print(f"# of removed duplicates: {data_len_before-data_len_after}")
```
Let's drop rows with NaN values if there's any:
```
dataset.dropna(inplace=True)
dataset
```
Let's make sure that all our cells are of the same type:
```
dataset = dataset.applymap(str)
print(f"Number of question-answer pairs in the dataset: {len(dataset)}")
distinct_chars(dataset, ['Question', 'Answer'])
```
Finally, let's save the dataset:
```
dataset.to_csv(files_path + '/dataset.csv')
```
| github_jupyter |

# Python for Data Professionals
## 02 Programming Basics
<p style="border-bottom: 1px solid lightgrey;"></p>
<dl>
<dt>Course Outline</dt>
<dt>1 - Overview and Course Setup</dt>
<dt>2 - Programming Basics <i>(This section)</i></dt>
<dd>2.1 - Getting help</dd>
<dd>2.2 Code Syntax and Structure</dd>
<dd>2.3 Variables<dd>
<dd>2.4 Operations and Functions<dd>
<dt>3 Working with Data</dt>
<dt>4 Deployment and Environments</dt>
<dl>
<p style="border-bottom: 1px solid lightgrey;"></p>
## Programming Basics Overview
From here on out, you'll focus on using Python in programming mode - you'll write code that you run from an IDE or a calling environment, not interactively from the command-line. As you work through this explanation, copy the code you see and run it to see the results. After you work through these copy-and-paste examples, you'll create your own code in the Activities that follow each section.
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/cortanalogo.png"><b>2.1 - Getting help</b></p>
The very first thing you should learn in any language is how to get help. You can [find the help documents on-line](https://docs.python.org/3/index.html), or simply type
`help()`
in your code. For help on a specific topic, put the topic in the parenthesis:
`help(str)`
To see a list of topics, type
`help(topics)`
```
# Try it:
```
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/cortanalogo.png"><b>2.2 Code Syntax and Structure</b></p>
Let's cover a few basics about how Python code is written. (For a full discussion, check out the [Style Guide for Python, called PEP 8](https://www.python.org/dev/peps/pep-0008/) ) Let's use the "Zen of Python" rules from Tim Peters for this course:
<pre>
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than right now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
--Tim Peters
</pre>
In general, use standard coding practices - don't use keywords for variables, be consistent in your naming (camel-case, lower-case, etc.), comment your code clearly, and understand the general syntax of your language, and follow the principles above. But the most important tip is to at least read the PEP 8 and decide for yourself how well that fits into your Zen.
There is one hard-and-fast rule for Python that you *do* need to be aware of: indentation. You **must** indent your code for classes, functions (or methods), loops, conditions, and lists. You can use a tab or four spaces (spaces are the accepted way to do it) but in any case, you have to be consistent. If you use tabs, you always use tabs. If you use spaces, you have to use that throughout. It's best if you set your IDE to handle that for you, whichever way you go.
Python code files have an extension of `.py`.
Comments in Python start with the hash-tag: `#`. There are no block comments (and this makes us all sad) so each line you want to comment must have a tag in front of that line. Keep the lines short (80 characters or so) so that they don't fall off a single-line display like at the command line.
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/checkbox.png"><b>2.3 Variables</b></p>
Variables stand in for replaceable values. Python is not strongly-typed, meaning you can just declare a variable name and set it to a value at the same time, and Python will try and guess what data type you want. You use an `=` sign to assign values, and `==` to compare things.
Quotes \" or ticks \' are fine, just be consistent.
`# There are some keywords to be aware of, but x and y are always good choices.`
`x = "Buck" # I'm a string.`
`type(x)`
`y = 10 # I'm an integer.`
`type(y)`
To change the type of a value, just re-enter something else:
`x = "Buck" # I'm a string.`
`type(x)`
`x = 10 # Now I'm an integer.`
`type(x)`
Or cast it By implicitly declaring the conversion:
`x = "10"`
`type(x)`
`print int(x)`
To concatenate string values, use the `+` sign:
`x = "Buck"`
`y = " Woody"`
`print(x + y)`
```
# Try it:
```
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/checkbox.png"><b>2.4 Operations and Functions</b></p>
Python has the following operators:
Arithmetic Operators
Comparison (Relational) Operators
Assignment Operators
Logical Operators
Bitwise Operators
Membership Operators
Identity Operators
You have the standard operators and functions from most every language. Here are some of the tokens:
<pre>
!= *= << ^
" + <<= ^=
""" += <= `
% , <> __
%= - ==
& -= > b"
&= . >= b'
' ... >> j
''' / >>= r"
( // @ r'
) //= J |'
* /= [ |=
** : \ ~
**= < ]
</pre>
Wait...that's it? That's all you're going to tell me? *(Hint: use what you've learned):*
`help('symbols')`
Walk through each of these operators carefully - you'll use them when you work with data in the next module.
```
# Try it:
```
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/aml-logo.png"><b>Activity - Programming basics</b></p>
Open the **02_ProgrammingBasics.py** file and run the code you see there. The exercises will be marked out using comments:
`# <TODO> - Section Number`
```
# 02_ProgrammingBasics.py
# Purpose: General Programming exercises for Python
# Author: Buck Woody
# Credits and Sources: Inline
# Last Updated: 27 June 2018
# 2.1 Getting Help
help()
help(str)
# <TODO> - Write code to find help on help
# 2.2 Code Syntax and Structure
# <TODO> - Python uses spaces to indicate code blocks. Fix the code below:
x=10
y=5
if x > y:
print(str(x) + " is greater than " + str(y))
# <TODO> - Arguments on first line are forbidden when not using vertical alignment. Fix this code:
foo = long_function_name(var_one, var_two,
var_three, var_four)
# <TODO> operators sit far away from their operands. Fix this code:
income = (gross_wages +
taxable_interest +
(dividends - qualified_dividends) -
ira_deduction -
student_loan_interest)
# <TODO> - The import statement should use separate lines for each effort. You can fix the code below
# using separate lines or by using the "from" statement:
import sys, os
# <TODO> - The following code has extra spaces in the wrong places. Fix this code:
i=i+1
submitted +=1
x = x * 2 - 1
hypot2 = x * x + y * y
c = (a + b) * (a - b)
# 2.3 Variables
# <TODO> - Add a line below x=3 that changes the variable x from int to a string
x=3
type(x)
# <TODO> - Write code that prints the string "This class is awesome" using variables:
x="is awesome"
y="This Class"
# 2.4 Operations and Functions
# <TODO> - Use some basic operators to write the following code:
# Assign two variables
# Add them
# Subtract 20 from each, add those values together, save that to a new variable
# Create a new string variable with the text "The result of my operations are: "
# Print out a single string on the screen with the result of the variables
# showing that result.
# EOF: 02_ProgrammingBasics.py
```
<p><img style="float: left; margin: 0px 15px 15px 0px;" src="../graphics/thinking.jpg"><b>For Further Study</b></p>
- The PEP - https://www.python.org/dev/peps/pep-0008/
- Introduction to the Python Coding Style - http://stackabuse.com/introduction-to-the-python-coding-style/
- The Microsoft Tutorial and samples for Python - https://code.visualstudio.com/docs/languages/python
- Coding requirements and standards - PEP - https://www.python.org/dev/peps/pep-0008/
- Another free online self-paced course - https://www.w3schools.com/python/default.asp
Next, Continue to *03 Working with Data*
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_10_3_text_generation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 10: Time Series in Keras**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 10 Material
* Part 10.1: Time Series Data Encoding for Deep Learning [[Video]](https://www.youtube.com/watch?v=dMUmHsktl04&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_1_timeseries.ipynb)
* Part 10.2: Programming LSTM with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=wY0dyFgNCgY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_2_lstm.ipynb)
* **Part 10.3: Text Generation with Keras and TensorFlow** [[Video]](https://www.youtube.com/watch?v=6ORnRAz3gnA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_3_text_generation.ipynb)
* Part 10.4: Image Captioning with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=NmoW_AYWkb4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_4_captioning.ipynb)
* Part 10.5: Temporal CNN in Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=i390g8acZwk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_5_temporal_cnn.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
```
# Part 10.3: Text Generation with LSTM
Recurrent neural networks are also known for their ability to generate text. As a result, the output of the neural network can be free-form text. In this section, we will see how to train an LSTM can on a textual document, such as classic literature, and learn to output new text that appears to be of the same form as the training material. If you train your LSTM on [Shakespeare](https://en.wikipedia.org/wiki/William_Shakespeare), it will learn to crank out new prose similar to what Shakespeare had written.
Don't get your hopes up. You are not going to teach your deep neural network to write the next [Pulitzer Prize for Fiction](https://en.wikipedia.org/wiki/Pulitzer_Prize_for_Fiction). The prose generated by your neural network will be nonsensical. However, it will usually be nearly grammatically and of a similar style as the source training documents.
A neural network generating nonsensical text based on literature may not seem useful at first glance. However, this technology gets so much interest because it forms the foundation for many more advanced technologies. The fact that the LSTM will typically learn human grammar from the source document opens a wide range of possibilities. You can use similar technology to complete sentences when a user is entering text. Simply the ability to output free-form text becomes the foundation of many other technologies. In the next part, we will use this technique to create a neural network that can write captions for images to describe what is going on in the picture.
### Additional Information
The following are some of the articles that I found useful in putting this section together.
* [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/)
* [Keras LSTM Generation Example](https://keras.io/examples/lstm_text_generation/)
### Character-Level Text Generation
There are several different approaches to teaching a neural network to output free-form text. The most basic question is if you wish the neural network to learn at the word or character level. In many ways, learning at the character level is the more interesting of the two. The LSTM is learning to construct its own words without even being shown what a word is. We will begin with character-level text generation. In the next module, we will see how we can use nearly the same technique to operate at the word level. We will implement word-level automatic captioning in the next module.
We begin by importing the needed Python packages and defining the sequence length, named **maxlen**. Time-series neural networks always accept their input as a fixed-length array. Because you might not use all of the sequence elements, it is common to fill extra elements with zeros. You will divide the text into sequences of this length, and the neural network will train to predict what comes after this sequence.
```
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import get_file
import numpy as np
import random
import sys
import io
import requests
import re
```
For this simple example, we will train the neural network on the classic children's book [Treasure Island](https://en.wikipedia.org/wiki/Treasure_Island). We begin by loading this text into a Python string and displaying the first 1,000 characters.
```
r = requests.get("https://data.heatonresearch.com/data/t81-558/text/"\
"treasure_island.txt")
raw_text = r.text
print(raw_text[0:1000])
```
We will extract all unique characters from the text and sort them. This technique allows us to assign a unique ID to each character. Because we sorted the characters, these IDs should remain the same. If we add new characters to the original text, then the IDs would change. We build two dictionaries. The first **char2idx** is used to convert a character into its ID. The second **idx2char** converts an ID back into its character.
```
processed_text = raw_text.lower()
processed_text = re.sub(r'[^\x00-\x7f]',r'', processed_text)
print('corpus length:', len(processed_text))
chars = sorted(list(set(processed_text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
```
We are now ready to build the actual sequences. Just like previous neural networks, there will be an $x$ and $y$. However, for the LSTM, $x$ and $y$ will both be sequences. The $x$ input will specify the sequences where $y$ are the expected output. The following code generates all possible sequences.
```
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(processed_text) - maxlen, step):
sentences.append(processed_text[i: i + maxlen])
next_chars.append(processed_text[i + maxlen])
print('nb sequences:', len(sentences))
sentences
print('Vectorization...')
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
x.shape
y.shape
```
The dummy variables for $y$ are shown below.
```
y[0:10]
```
Next, we create the neural network. This neural network's primary feature is the LSTM layer, which allows the sequences to be processed.
```
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation='softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.summary()
```
The LSTM will produce new text character by character. We will need to sample the correct letter from the LSTM predictions each time. The **sample** function accepts the following two parameters:
* **preds** - The output neurons.
* **temperature** - 1.0 is the most conservative, 0.0 is the most confident (willing to make spelling and other errors).
The sample function below is essentially performing a [softmax]() on the neural network predictions. This causes each output neuron to become a probability of its particular letter.
```
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
```
Keras calls the following function at the end of each training Epoch. The code generates sample text generations that visually demonstrate the neural network better at text generation. As the neural network trains, the generations should look more realistic.
```
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print("******************************************************")
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(processed_text) - maxlen - 1)
for temperature in [0.2, 0.5, 1.0, 1.2]:
print('----- temperature:', temperature)
generated = ''
sentence = processed_text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
```
We are now ready to train. It can take up to an hour to train this network, depending on how fast your computer is. If you have a GPU available, please make sure to use it.
```
# Ignore useless W0819 warnings generated by TensorFlow 2.0. Hopefully can remove this ignore in the future.
# See https://github.com/tensorflow/tensorflow/issues/31308
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Fit the model
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y,
batch_size=128,
epochs=60,
callbacks=[print_callback])
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
from os.path import join
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import skbio
# from q2d2 import get_within_between_distances, filter_dm_and_map
from stats import mc_t_two_sample
from skbio.stats.distance import anosim, permanova
from skbio.stats.composition import ancom, multiplicative_replacement
import itertools
```
##Define a couple of helper functions
```
def get_within_between_distances(map_df, dm, col):
filtered_dm, filtered_map = filter_dm_and_map(dm, map_df)
groups = []
distances = []
map_dict = filtered_map[col].to_dict()
for id_1, id_2 in itertools.combinations(filtered_map.index.tolist(), 2):
row = []
if map_dict[id_1] == map_dict[id_2]:
groups.append('Within')
else:
groups.append('Between')
distances.append(filtered_dm[(id_1, id_2)])
groups = zip(groups, distances)
distances_df = pd.DataFrame(data=list(groups), columns=['Groups', 'Distance'])
return distances_df
def filter_dm_and_map(dm, map_df):
ids_to_exclude = set(dm.ids) - set(map_df.index.values)
ids_to_keep = set(dm.ids) - ids_to_exclude
filtered_dm = dm.filter(ids_to_keep)
filtered_map = map_df.loc[ids_to_keep]
return filtered_dm, filtered_map
colors = sns.color_palette("YlGnBu", 100)
sns.palplot(colors)
```
Load mapping file and munge it
-----------------
```
home = '/home/office-microbe-files'
map_fp = join(home, 'master_map_150908.txt')
sample_md = pd.read_csv(map_fp, sep='\t', index_col=0, dtype=str)
sample_md = sample_md[sample_md['16SITS'] == 'ITS']
sample_md = sample_md[sample_md['OfficeSample'] == 'yes']
replicate_ids = '''F2F.2.Ce.021
F2F.2.Ce.022
F2F.3.Ce.021
F2F.3.Ce.022
F2W.2.Ca.021
F2W.2.Ca.022
F2W.2.Ce.021
F2W.2.Ce.022
F3W.2.Ce.021
F3W.2.Ce.022
F1F.3.Ca.021
F1F.3.Ca.022
F1C.3.Ca.021
F1C.3.Ca.022
F1W.2.Ce.021
F1W.2.Ce.022
F1W.3.Dr.021
F1W.3.Dr.022
F1C.3.Dr.021
F1C.3.Dr.022
F2W.3.Dr.059
F3F.2.Ce.078'''.split('\n')
reps = sample_md[sample_md['Description'].isin(replicate_ids)]
reps = reps.drop(reps.drop_duplicates('Description').index).index
sample_md.drop(reps, inplace=True)
```
Load alpha diversity
----------------------
```
alpha_div_fp = '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/arare_max999/alpha_div_collated/observed_species.txt'
alpha_div = pd.read_csv(alpha_div_fp, sep='\t', index_col=0)
alpha_div = alpha_div.T.drop(['sequences per sample', 'iteration'])
alpha_cols = [e for e in alpha_div.columns if '990' in e]
alpha_div = alpha_div[alpha_cols]
sample_md = pd.concat([sample_md, alpha_div], axis=1, join='inner')
sample_md['MeanAlpha'] = sample_md[alpha_cols].mean(axis=1)
sample_md['MedianAlpha'] = sample_md[alpha_cols].median(axis=1)
alpha_div = pd.read_csv(alpha_div_fp, sep='\t', index_col=0)
alpha_div = alpha_div.T.drop(['sequences per sample', 'iteration'])
alpha_cols = [e for e in alpha_div.columns if '990' in e]
alpha_div = alpha_div[alpha_cols]
```
add alpha diversity to map
-------------
```
sample_md = pd.concat([sample_md, alpha_div], axis=1, join='inner')
sample_md['MeanAlpha'] = sample_md[alpha_cols].mean(axis=1)
```
Filter the samples so that only corrosponding row 2, 3 samples are included
-----------------------------------------------------------
```
sample_md['NoRow'] = sample_md['Description'].apply(lambda x: x[:3] + x[5:])
row_df = sample_md[sample_md.duplicated('NoRow', keep=False)].copy()
row_df['SampleType'] = 'All Row 2/3 Pairs (n={0})'.format(int(len(row_df)/2))
plot_row_df = row_df[['Row', 'MeanAlpha', 'SampleType']]
sample_md_wall = row_df[row_df['PlateLocation'] != 'floor'].copy()
sample_md_wall['SampleType'] = 'Wall and Ceiling Pairs (n={0})'.format(int(len(sample_md_wall)/2))
plot_sample_md_wall = sample_md_wall[['Row', 'MeanAlpha', 'SampleType']]
sample_md_floor = row_df[row_df['PlateLocation'] == 'floor'].copy()
sample_md_floor['SampleType'] = 'Floor Pairs (n={0})'.format(int(len(sample_md_floor)/2))
plot_sample_md_floor = sample_md_floor[['Row', 'MeanAlpha', 'SampleType']]
plot_df = pd.concat([plot_row_df, plot_sample_md_wall, plot_sample_md_floor])
with plt.rc_context(dict(sns.axes_style("darkgrid"),
**sns.plotting_context("notebook", font_scale=2.5))):
plt.figure(figsize=(20, 11))
ax = sns.violinplot(x='SampleType', y='MeanAlpha', data=plot_df, hue='Row', hue_order=['3', '2'],
palette="YlGnBu")
ax.set_xlabel('')
handles, labels = ax.get_legend_handles_labels()
ax.set_ylabel('OTU Counts')
ax.set_title('OTU Counts')
ax.legend(handles, ['Frequent', 'Infrequent'], title='Sampling Frequency')
ax.get_legend().get_title().set_fontsize('15')
plt.savefig('figure-3-its-A.svg', dpi=300)
row_2_values = list(row_df[(row_df['Row'] == '2')]['MeanAlpha'])
row_3_values = list(row_df[(row_df['Row'] == '3')]['MeanAlpha'])
obs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)
obs_t, param_p_val
print((obs_t, param_p_val), "row 2 mean: {0}, row 1 mean: {1}".format(np.mean(row_2_values),np.mean(row_3_values)))
row_2_values = list(sample_md_wall[(sample_md_wall['Row'] == '2')]['MeanAlpha'])
row_3_values = list(sample_md_wall[(sample_md_wall['Row'] == '3')]['MeanAlpha'])
obs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)
print((obs_t, param_p_val), "row 2 mean: {0}, row 1 mean: {1}".format(np.mean(row_2_values),np.mean(row_3_values)))
row_2_values = list(sample_md_floor[(sample_md_floor['Row'] == '2')]['MeanAlpha'])
row_3_values = list(sample_md_floor[(sample_md_floor['Row'] == '3')]['MeanAlpha'])
obs_t, param_p_val, perm_t_stats, nonparam_p_val = mc_t_two_sample(row_2_values, row_3_values)
print((obs_t, param_p_val), "row 2 mean: {0}, row 1 mean: {1}".format(np.mean(row_2_values),np.mean(row_3_values)))
```
#Beta Diversity!
Create beta diversity boxplots of within and bewteen distances for row. It may not make a lot of sense doing this for all samples as the location and or city affect may drown out the row affect
Load the distance matrix
----------------------
```
dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/bdiv_even999/binary_jaccard_dm.txt'))
```
Run permanova and recored within between values on various categories
----------------------
All of these will be based on the row 2, 3 paired samples, though they may be filtered to avoind confounding variables
###Row distances
```
filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
row_dists = get_within_between_distances(filt_map, filt_dm, 'Row')
row_dists['Category'] = 'Row (n=198)'
permanova(filt_dm, filt_map, column='Row', permutations=999)
```
###Plate location
We can use the same samples for this as the previous test
```
plate_dists = get_within_between_distances(filt_map, filt_dm, 'PlateLocation')
plate_dists['Category'] = 'Plate Location (n=198)'
permanova(filt_dm, filt_map, column='PlateLocation', permutations=999)
```
###Run
```
filt_map = row_df[(row_df['City'] == 'flagstaff')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
run_dists = get_within_between_distances(filt_map, filt_dm, 'Run')
run_dists['Category'] = 'Run (n=357)'
permanova(filt_dm, filt_map, column='Run', permutations=999)
```
###Material
```
filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
material_dists = get_within_between_distances(filt_map, filt_dm, 'Material')
material_dists['Category'] = 'Material (n=198)'
permanova(filt_dm, filt_map, column='Material', permutations=999)
all_dists = material_dists.append(row_dists).append(plate_dists).append(run_dists)
with plt.rc_context(dict(sns.axes_style("darkgrid"),
**sns.plotting_context("notebook", font_scale=1.8))):
plt.figure(figsize=(20,11))
ax = sns.boxplot(x="Category", y="Distance", hue="Groups", hue_order=['Within', 'Between'], data=all_dists, palette=sns.color_palette(['#f1fabb', '#2259a6']))
ax.set_ylim([0.9, 1.02])
ax.set_xlabel('')
ax.set_title('Binary-Jaccard')
plt.legend(loc='upper right')
plt.savefig('figure-3-its-B.svg', dpi=300)
dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_open/bdiv_even999/bray_curtis_dm.txt'))
```
##Row Distances
```
filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
row_dists = get_within_between_distances(filt_map, filt_dm, 'Row')
row_dists['Category'] = 'Row (n=198)'
permanova(filt_dm, filt_map, column='Row', permutations=999)
```
##Plate Location
```
plate_dists = get_within_between_distances(filt_map, filt_dm, 'PlateLocation')
plate_dists['Category'] = 'Plate Location (n=198)'
permanova(filt_dm, filt_map, column='PlateLocation', permutations=999)
```
##Run
```
filt_map = row_df[(row_df['City'] == 'flagstaff')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
run_dists = get_within_between_distances(filt_map, filt_dm, 'Run')
run_dists['Category'] = 'Run (n=357)'
permanova(filt_dm, filt_map, column='Run', permutations=999)
```
##Material
```
filt_map = row_df[(row_df['City'] == 'flagstaff') & (row_df['Run'] == '2')]
filt_dm, filt_map = filter_dm_and_map(dm, filt_map)
material_dists = get_within_between_distances(filt_map, filt_dm, 'Material')
material_dists['Category'] = 'Material (n=198)'
permanova(filt_dm, filt_map, column='Material', permutations=999)
all_dists = material_dists.append(row_dists).append(plate_dists).append(run_dists)
with plt.rc_context(dict(sns.axes_style("darkgrid"),
**sns.plotting_context("notebook", font_scale=1.8))):
plt.figure(figsize=(20,11))
ax = sns.boxplot(x="Category", y="Distance", hue="Groups", hue_order=['Within', 'Between'], data=all_dists, palette=sns.color_palette(['#f1fabb', '#2259a6']))
ax.set_ylim([0.9, 1.02])
ax.set_xlabel('')
ax.set_title('Bray-Curtis')
plt.legend(loc='upper right')
plt.savefig('figure-3-its-C.svg', dpi=300)
```
ANCOM
-----
```
table_fp = join(home, 'core_div_out/table_even1000.txt')
table = pd.read_csv(table_fp, sep='\t', skiprows=1, index_col=0).T
table.index = table.index.astype(str)
table_ancom = table.loc[:, table[:3].sum(axis=0) > 0]
table_ancom = pd.DataFrame(multiplicative_replacement(table_ancom), index=table_ancom.index, columns=table_ancom.columns)
table_ancom.dropna(axis=0, inplace=True)
intersect_ids = set(row_md.index).intersection(set(table_ancom.index))
row_md_ancom = row_md.loc[intersect_ids, ]
table_ancom = table_ancom.loc[intersect_ids, ]
%time
results = ancom(table_ancom, row_md_ancom['Row'])
sigs = results[results['reject'] == True]
tax_fp = '/home/office-microbe-files/pick_otus_out_97/uclust_assigned_taxonomy/rep_set_tax_assignments.txt'
taxa_map = pd.read_csv(tax_fp, sep='\t', index_col=0, names=['Taxa', 'none', 'none'])
taxa_map.drop('none', axis=1, inplace=True)
taxa_map.index = taxa_map.index.astype(str)
taxa_map.loc[sigs.sort_values('W').index.astype(str)]
pd.options.display.max_colwidth = 200
sigs
np.mean(w_dm.data)
np.median(w_dm.data)
w_dm = skbio.DistanceMatrix.read(join(home, '/home/johnchase/office-project/office-microbes/notebooks/UNITE-analysis/core_div/core_div_closed/bdiv_even1000/bray_curtis_dm.txt'))
np.mean(w_dm.data)
np.median(w_dm.data)
4980239/22783729
foo
```
| github_jupyter |
```
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
#!wget --no-check-certificate \
# https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv \
# -O /tmp/daily-min-temperatures.csv
root = r'D:\Users\Arkady\Verint\Coursera_2019_Tensorflow_Specialization\Course4_Sequences_TimeSeries_Prediction'
srcurl = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
#import pandas as pd
#df = pd.read_csv(srcurl)
#df.to_csv(root + '/tmp/daily-min-temperatures.csv')
import csv
time_step = []
temps = []
with open(root + '/tmp/daily-min-temperatures.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
step=0
for row in reader:
temps.append(float(row[2]))
time_step.append(step)
step = step + 1
series = np.array(temps)
time = np.array(time_step)
plt.figure(figsize=(10, 6))
plot_series(time, series)
split_time = 2500
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
window_size = 30
batch_size = 32
shuffle_buffer_size = 1000
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
window_size = 64
batch_size = 256
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
print(train_set)
print(x_train.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 60])
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,epochs=150)
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
print(rnn_forecast)
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import h5py
archive = h5py.File('/Users/bmmorris/git/aesop/notebooks/spectra.hdf5', 'r+')
targets = list(archive)
list(archive['HD122120'])#['2017-09-11T03:27:13.140']['flux'][:]
from scipy.ndimage import gaussian_filter1d
spectrum1 = archive['HATP11']['2017-06-12T07:28:06.310'] # K4
spectrum2 = archive['HD110833']['2017-03-17T05:47:24.899'] # K3
spectrum3 = archive['HD122120']['2017-06-15T03:52:13.690'] # K5
wavelength1 = spectrum1['wavelength'][:]
flux1 = spectrum1['flux'][:]
wavelength2 = spectrum2['wavelength'][:]
flux2 = spectrum2['flux'][:]
wavelength3 = spectrum3['wavelength'][:]
flux3 = spectrum3['flux'][:]
plt.plot(wavelength1, flux1)
plt.plot(wavelength2, gaussian_filter1d(flux2, 1))# + 0.2)
plt.plot(wavelength3, gaussian_filter1d(flux3, 1))# + 0.4)
plt.ylim([0.5, 1.1])
#plt.xlim([3900, 4000])
# plt.xlim([7035, 7075])
plt.xlim([8850, 8890])
import sys
sys.path.insert(0, '../')
from toolkit import SimpleSpectrum
import astropy.units as u
target = SimpleSpectrum(wavelength1, flux1, dispersion_unit=u.Angstrom)
source1 = SimpleSpectrum(wavelength2, flux2, dispersion_unit=u.Angstrom)
source2 = SimpleSpectrum(wavelength3, flux3, dispersion_unit=u.Angstrom)
from toolkit import instr_model
from toolkit import slice_spectrum, concatenate_spectra, bands_TiO
spec_band = []
first_n_bands = 5
width = 5
for band in bands_TiO[:first_n_bands]:
target_slice = slice_spectrum(target, band.min-width*u.Angstrom, band.max+width*u.Angstrom)
target_slice.flux /= target_slice.flux.max()
spec_band.append(target_slice)
target_slices = concatenate_spectra(spec_band)
target_slices.plot(color='k', lw=2, marker='.')
spec_band = []
for band, inds in zip(bands_TiO[:first_n_bands], target_slices.wavelength_splits):
target_slice = slice_spectrum(source1, band.min-width*u.Angstrom, band.max+width*u.Angstrom,
force_length=abs(np.diff(inds))[0])
target_slice.flux /= target_slice.flux.max()
spec_band.append(target_slice)
source1_slices = concatenate_spectra(spec_band)
source1_slices.plot(color='r', lw=2, marker='.')
spec_band = []
for band, inds in zip(bands_TiO[:first_n_bands], target_slices.wavelength_splits):
target_slice = slice_spectrum(source2, band.min-width*u.Angstrom, band.max+width*u.Angstrom,
force_length=abs(np.diff(inds))[0])
target_slice.flux /= target_slice.flux.max()
spec_band.append(target_slice)
source2_slices = concatenate_spectra(spec_band)
source2_slices.plot(color='b', lw=2, marker='.')
def plot_spliced_spectrum(observed_spectrum, model_flux, other_model=None):
n_chunks = len(observed_spectrum.wavelength_splits)
fig, ax = plt.subplots(n_chunks, 1, figsize=(8, 10))
for i, inds in enumerate(observed_spectrum.wavelength_splits):
min_ind, max_ind = inds
ax[i].errorbar(observed_spectrum.wavelength[min_ind:max_ind].value,
observed_spectrum.flux[min_ind:max_ind],
0.025*np.ones(max_ind-min_ind))
ax[i].plot(observed_spectrum.wavelength[min_ind:max_ind],
model_flux[min_ind:max_ind])
if other_model is not None:
ax[i].plot(observed_spectrum.wavelength[min_ind:max_ind],
other_model[min_ind:max_ind], alpha=0.4)
ax[i].set_xlim([observed_spectrum.wavelength[min_ind].value,
observed_spectrum.wavelength[max_ind-1].value])
ax[i].set_ylim([0.9*observed_spectrum.flux[min_ind:max_ind].min(),
1.1])
return fig, ax
plot_spliced_spectrum(target_slices, source1_slices.flux, source2_slices.flux)
model, resid = instr_model(target_slices, source1_slices, source2_slices, np.log(0.5), 1, 1, 0, 0, 0, 0, 0)
plt.plot(target_slices.flux - model)
# from scipy.optimize import fmin_l_bfgs_b
# def chi2(p, target, temp_phot, temp_spot):
# spotted_area, lam_offset0, lam_offset1, lam_offset2, res = p
# lam_offsets = [lam_offset0, lam_offset1, lam_offset1]
# model, residuals = instr_model(target, temp_phot, temp_spot, spotted_area,
# res, *lam_offsets)
# return residuals
# bounds = [[-30, 0], [-2, 2], [-2, 2], [-2, 2], [1, 15]]
# initp = [np.log(0.03), 0.0, 0.0, 0.0, 1]
# bfgs_options_fast = dict(epsilon=1e-3, approx_grad=True,
# m=10, maxls=20)
# bfgs_options_precise = dict(epsilon=1e-3, approx_grad=True,
# m=30, maxls=50)
# result = fmin_l_bfgs_b(chi2, initp, bounds=bounds,
# args=(target_slices, source1_slices, source2_slices),
# **bfgs_options_precise)
# #**bfgs_options_fast)
# model, resid = instr_model(target_slices, source1_slices, source2_slices, *result[0])
# plot_spliced_spectrum(target_slices, model)
import emcee
yerr = 0.01
def random_in_range(min, max):
return (max-min)*np.random.rand(1)[0] + min
def lnprior(theta):
log_spotted_area, res = theta[:2]
dlambdas = theta[2:]
if (-15 < log_spotted_area <= 0 and 0. <= res < 3 and all([-3 < dlambda < 3 for dlambda in dlambdas])):
return 0.0
return -np.inf
def lnlike(theta, target, source1, source2):
log_spotted_area, res = theta[:2]
dlambdas = theta[2:]
model, residuals = instr_model(target, source1, source2, np.exp(log_spotted_area),
res, *dlambdas)
return -0.5*residuals/yerr**2
def lnprob(theta, target, source1, source2):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, target, source1, source2)
from emcee import EnsembleSampler
dlam_init = -0.2
# initp = np.array([np.log(0.01), 1, dlam_init, dlam_init, dlam_init, dlam_init, dlam_init])
ndim, nwalkers = 6, 30
pos = []
counter = -1
while len(pos) < nwalkers:
realization = [random_in_range(-10, -8), random_in_range(0, 1),
random_in_range(dlam_init-0.1, dlam_init+0.1), random_in_range(dlam_init-0.1, dlam_init+0.1),
random_in_range(dlam_init-0.1, dlam_init+0.1), random_in_range(dlam_init-0.1, dlam_init+0.1)]
if np.isfinite(lnprior(realization)):
pos.append(realization)
sampler = EnsembleSampler(nwalkers, ndim, lnprob, threads=8,
args=(target_slices, source1_slices, source2_slices))
sampler.run_mcmc(pos, 4000);
from corner import corner
samples = sampler.chain[:, 1500:, :].reshape((-1, ndim))
corner(samples, labels=['$\log f_s$', '$R$', '$\Delta \lambda_0$', '$\Delta \lambda_1$',
'$\Delta \lambda_2$', '$\Delta \lambda_3$']);#, '$\Delta \lambda_4$']);
best_params = sampler.flatchain[np.argmax(sampler.flatlnprobability, axis=0), :]
best_model = instr_model(target_slices, source1_slices, source2_slices,
*best_params)[0]
best_params
# maximum spotted area
np.exp(np.percentile(samples[:, 0], 98))
n_chunks = len(target_slices.wavelength_splits)
fig, ax = plt.subplots(n_chunks, 1, figsize=(8, 10))
from copy import deepcopy
from toolkit.analysis import gaussian_kernel
for i, inds in enumerate(target_slices.wavelength_splits):
min_ind, max_ind = inds
ax[i].errorbar(target_slices.wavelength[min_ind:max_ind].value,
target_slices.flux[min_ind:max_ind],
yerr*np.ones_like(target_slices.flux[min_ind:max_ind]),
fmt='o', color='k')
#0.025*np.ones(max_ind-min_ind), fmt='.')
ax[i].plot(target_slices.wavelength[min_ind:max_ind],
best_model[min_ind:max_ind], color='r')
ax[i].set_xlim([target_slices.wavelength[min_ind].value,
target_slices.wavelength[max_ind-1].value])
#ax[i].set_ylim([0.9*target_slices.flux[min_ind:max_ind].min(),
# 1.1])
n_random_draws = 100
# draw models from posteriors
for j in range(n_random_draws):
step = np.random.randint(0, samples.shape[0])
random_step = samples[step, :]
rand_model = instr_model(target_slices, source1_slices, source2_slices, *random_step)[0]
for i, inds in enumerate(target_slices.wavelength_splits):
min_ind, max_ind = inds
ax[i].plot(target_slices.wavelength[min_ind:max_ind],
rand_model[min_ind:max_ind], color='#389df7', alpha=0.1)
```
| github_jupyter |
## Mixture Density Networks with PyTorch ##
Related posts:
JavaScript [implementation](http://blog.otoro.net/2015/06/14/mixture-density-networks/).
TensorFlow [implementation](http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/).
```
import matplotlib.pyplot as plt
import numpy as np
import torch
import math
from torch.autograd import Variable
import torch.nn as nn
```
### Simple Data Fitting ###
Before we talk about MDN's, we try to perform some simple data fitting using PyTorch to make sure everything works. To get started, let's try to quickly build a neural network to fit some fake data. As neural nets of even one hidden layer can be universal function approximators, we can see if we can train a simple neural network to fit a noisy sinusoidal data, like this ( $\epsilon$ is just standard gaussian random noise):
$y=7.0 \sin( 0.75 x) + 0.5 x + \epsilon$
After importing the libraries, we generate the sinusoidal data we will train a neural net to fit later:
```
NSAMPLE = 1000
x_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
r_data = np.float32(np.random.normal(size=(NSAMPLE,1)))
y_data = np.float32(np.sin(0.75*x_data)*7.0+x_data*0.5+r_data*1.0)
plt.figure(figsize=(8, 8))
plot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)
plt.show()
```
We will define this simple neural network one-hidden layer and 100 nodes:
$Y = W_{out} \max( W_{in} X + b_{in}, 0) + b_{out}$
```
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
# from (https://github.com/jcjohnson/pytorch-examples)
N, D_in, H, D_out = NSAMPLE, 1, 100, 1
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
# since NSAMPLE is not large, we train entire dataset in one minibatch.
x = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, D_in)))
y = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, D_out)), requires_grad=False)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
```
We can define a loss function as the sum of square error of the output vs the data (we can add regularisation if we want).
```
loss_fn = torch.nn.MSELoss()
```
We will also define a training loop to minimise the loss function later. We can use the RMSProp gradient descent optimisation method.
```
learning_rate = 0.01
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, alpha=0.8)
for t in range(100000):
y_pred = model(x)
loss = loss_fn(y_pred, y)
if (t % 10000 == 0):
print(t, loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
x_test = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
x_test = Variable(torch.from_numpy(x_test.reshape(NSAMPLE, D_in)))
y_test = model(x_test)
plt.figure(figsize=(8, 8))
plt.plot(x_data,y_data,'ro', x_test.data.numpy(),y_test.data.numpy(),'bo',alpha=0.3)
plt.show()
```
We see that the neural network can fit this sinusoidal data quite well, as expected. However, this type of fitting method only works well when the function we want to approximate with the neural net is a one-to-one, or many-to-one function. Take for example, if we invert the training data:
$x=7.0 \sin( 0.75 y) + 0.5 y+ \epsilon$
```
temp_data = x_data
x_data = y_data
y_data = temp_data
plt.figure(figsize=(8, 8))
plot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)
plt.show()
```
If we were to use the same method to fit this inverted data, obviously it wouldn't work well, and we would expect to see a neural network trained to fit only to the square mean of the data.
```
x = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, D_in)))
y = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, D_out)), requires_grad=False)
learning_rate = 0.01
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, alpha=0.8)
for t in range(3000):
y_pred = model(x)
loss = loss_fn(y_pred, y)
if (t % 300 == 0):
print(t, loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
x_test = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
x_test = Variable(torch.from_numpy(x_test.reshape(NSAMPLE, D_in)))
y_test = model(x_test)
plt.figure(figsize=(8, 8))
plt.plot(x_data,y_data,'ro', x_test.data.numpy(),y_test.data.numpy(),'bo',alpha=0.3)
plt.show()
```
Our current model only predicts one output value for each input, so this approach will fail miserably. What we want is a model that has the capacity to predict a range of different output values for each input. In the next section we implement a Mixture Density Network (MDN) to achieve this task.
## Mixture Density Networks ##
Our current model only predicts one output value for each input, so this approach will fail. What we want is a model that has the capacity to predict a range of different output values for each input. In the next section we implement a *Mixture Density Network (MDN)* to do achieve this task.
Mixture Density Networks, developed by Christopher Bishop in the 1990s, is an attempt to address this problem. Rather to have the network predict a single output value, the MDN predicts an entire *probability distribution* of the output, so we can sample several possible different output values for a given input.
This concept is quite powerful, and can be employed many current areas of machine learning research. It also allows us to calculate some sort of confidence factor in the predictions that the network is making.
The inverse sinusoidal data we chose is not just for a toy problem, as there are applications in the field of robotics, for example, where we want to determine which angle we need to move the robot arm to achieve a target location. MDNs are also used to model handwriting, where the next stroke is drawn from a probability distribution of multiple possibilities, rather than sticking to one prediction.
Bishop's implementation of MDNs will predict a class of probability distributions called Mixture Gaussian distributions, where the output value is modelled as a sum of many gaussian random values, each with different means and standard deviations. So for each input $x$, we will predict a probability distribution function $P(Y = y | X = x)$ that is approximated by a weighted sum of different gaussian distributions.
$P(Y = y | X = x) = \sum_{k=0}^{K-1} \Pi_{k}(x) \phi(y, \mu_{k}(x), \sigma_{k}(x)), \sum_{k=0}^{K-1} \Pi_{k}(x) = 1$
Our network will therefore predict the *parameters* of the pdf, in our case the set of $\mu$, $\sigma$, and $\Pi$ values for each input $x$. Rather than predict $y$ directly, we will need to sample from our distribution to sample $y$. This will allow us to have multiple possible values of $y$ for a given $x$.
Each of the parameters $\Pi_{k}(x), \mu_{k}(x), \sigma_{k}(x)$ of the distribution will be determined by the neural network, as a function of the input $x$. There is a restriction that the sum of $\Pi_{k}(x)$ add up to one, to ensure that the pdf integrates to 1. In addition, $\sigma_{k}(x)$ must be strictly positive.
In our implementation, we will use a neural network of one hidden later with 100 nodes, and also generate 20 mixtures, hence there will be 60 actual outputs of our neural network of a single input. Our definition will be split into 2 parts:
$Z = W_{out} \max( W_{in} X + b_{in}, 0) + b_{out}$
In the first part, $Z$ is a vector of 60 values that will be then splitup into three equal parts, $[Z_{\Pi}, Z_{\sigma}, Z_{\mu}] = Z$, where each of $Z_{\Pi}$, $Z_{\sigma}$, $Z_{\mu}$ are vectors of length 20.
In this PyTorch implementation, unlike the TF version, we will implement this operation with 3 seperate Linear layers, rather than splitting a large $Z$, for clarity:
$Z_{\Pi} = W_{\Pi} \max( W_{in} X + b_{in}, 0) + b_{\Pi}$
$Z_{\sigma} = W_{\sigma} \max( W_{in} X + b_{in}, 0) + b_{\sigma}$
$Z_{\mu} = W_{\mu} \max( W_{in} X + b_{in}, 0) + b_{\mu}$
In the second part, the parameters of the pdf will be defined as below to satisfy the earlier conditions:
$\Pi = \frac{\exp(Z_{\Pi})}{\sum_{i=0}^{20} exp(Z_{\Pi, i})}, \\ \sigma = \exp(Z_{\sigma}), \\ \mu = Z_{\mu}$
$\Pi_{k}$ are put into a *softmax* operator to ensure that the sum adds to one, and that each mixture probability is positive. Each $\sigma_{k}$ will also be positive due to the exponential operator.
Below is the PyTorch implementation of the MDN network:
```
NHIDDEN = 100 # hidden units
KMIX = 20 # number of mixtures
class MDN(nn.Module):
def __init__(self, hidden_size, num_mixtures):
super(MDN, self).__init__()
self.fc_in = nn.Linear(1, hidden_size)
self.relu = nn.ReLU()
self.pi_out = torch.nn.Sequential(
nn.Linear(hidden_size, num_mixtures),
nn.Softmax()
)
self.sigma_out = nn.Linear(hidden_size, num_mixtures)
self.mu_out = nn.Linear(hidden_size, num_mixtures)
def forward(self, x):
out = self.fc_in(x)
out = self.relu(out)
out_pi = self.pi_out(out)
out_sigma = torch.exp(self.sigma_out(out))
out_mu = self.mu_out(out)
return (out_pi, out_sigma, out_mu)
```
Let's define the inverted data we want to train our MDN to predict later. As this is a more involved prediction task, I used a higher number of samples compared to the simple data fitting task earlier.
```
NSAMPLE = 2500
y_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
r_data = np.float32(np.random.normal(size=(NSAMPLE,1))) # random noise
x_data = np.float32(np.sin(0.75*y_data)*7.0+y_data*0.5+r_data*1.0)
x_train = Variable(torch.from_numpy(x_data.reshape(NSAMPLE, 1)))
y_train = Variable(torch.from_numpy(y_data.reshape(NSAMPLE, 1)), requires_grad=False)
plt.figure(figsize=(8, 8))
plt.plot(x_train.data.numpy(),y_train.data.numpy(),'ro', alpha=0.3)
plt.show()
```
We cannot simply use the min square error L2 lost function in this task the output is an entire description of the probability distribution. A more suitable loss function is to minimise the logarithm of the likelihood of the distribution vs the training data:
$CostFunction(y | x) = -\log[ \sum_{k}^K \Pi_{k}(x) \phi(y, \mu(x), \sigma(x)) ]$
So for every $(x,y)$ point in the training data set, we can compute a cost function based on the predicted distribution versus the actual points, and then attempt the minimise the sum of all the costs combined. To those who are familiar with logistic regression and cross entropy minimisation of softmax, this is a similar approach, but with non-discretised states.
We have to implement this cost function ourselves:
```
oneDivSqrtTwoPI = 1.0 / math.sqrt(2.0*math.pi) # normalisation factor for gaussian.
def gaussian_distribution(y, mu, sigma):
# braodcast subtraction with mean and normalization to sigma
result = (y.expand_as(mu) - mu) * torch.reciprocal(sigma)
result = - 0.5 * (result * result)
return (torch.exp(result) * torch.reciprocal(sigma)) * oneDivSqrtTwoPI
def mdn_loss_function(out_pi, out_sigma, out_mu, y):
epsilon = 1e-3
result = gaussian_distribution(y, out_mu, out_sigma) * out_pi
result = torch.sum(result, dim=1)
result = - torch.log(epsilon + result)
return torch.mean(result)
```
Let's define our model, and use the Adam optimizer to train our model below:
```
model = MDN(hidden_size=NHIDDEN, num_mixtures=KMIX)
learning_rate = 0.00001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(20000):
(out_pi, out_sigma, out_mu) = model(x_train)
loss = mdn_loss_function(out_pi, out_sigma, out_mu, y_train)
if (t % 1000 == 0):
print(t, loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
```
We want to use our network to generate the parameters of the pdf for us to sample from. In the code below, we will sample $M=10$ values of $y$ for every $x$ input, and compare the sampled results with the training data.
```
x_test_data = np.float32(np.random.uniform(-15, 15, (1, NSAMPLE))).T
x_test = Variable(torch.from_numpy(x_test_data.reshape(NSAMPLE, 1)))
(out_pi_test, out_sigma_test, out_mu_test) = model(x_test)
out_pi_test_data = out_pi_test.data.numpy()
out_sigma_test_data = out_sigma_test.data.numpy()
out_mu_test_data = out_mu_test.data.numpy()
def get_pi_idx(x, pdf):
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
print('error with sampling ensemble')
return -1
def generate_ensemble(M = 10):
# for each point in X, generate M=10 ensembles
NTEST = x_test_data.size
result = np.random.rand(NTEST, M) # initially random [0, 1]
rn = np.random.randn(NTEST, M) # normal random matrix (0.0, 1.0)
mu = 0
std = 0
idx = 0
# transforms result into random ensembles
for j in range(0, M):
for i in range(0, NTEST):
idx = get_pi_idx(result[i, j], out_pi_test_data[i])
mu = out_mu_test_data[i, idx]
std = out_sigma_test_data[i, idx]
result[i, j] = mu + rn[i, j]*std
return result
y_test_data = generate_ensemble()
plt.figure(figsize=(8, 8))
plt.plot(x_test_data,y_test_data,'b.', x_data,y_data,'r.',alpha=0.3)
plt.show()
```
In the above graph, we plot out the generated data we sampled from the MDN distribution, in blue. We also plot the original training data in red over the predictions. Apart from a few outliers, the distributions seem to match the data. We can also plot a graph of $\mu(x)$ as well to interpret what the neural net is actually doing:
```
plt.figure(figsize=(8, 8))
plt.plot(x_test_data,out_mu_test_data,'g.', x_data,y_data,'r.',alpha=0.3)
plt.show()
```
In the plot above, we see that for every point on the $x$-axis, there are multiple lines or states where $y$ may be, and we select these states with probabilities modelled by $\Pi$ .
| github_jupyter |
# Test For The Best Machine Learning Algorithm For Prediction
This notebook takes about 40 minutes to run, but we've already run it and saved the data for you. Please read through it, though, so that you understand how we came to the conclusions we'll use moving forward.
## Six Algorithms
We're going to compare six different algorithms to determine the best one to produce an accurate model for our predictions.
### Logistic Regression
Logistic Regression (LR) is a technique borrowed from the field of statistics. It is the go-to method for binary classification problems (problems with two class values).

Logistic Regression is named for the function used at the core of the method: the logistic function. The logistic function is a probablistic method used to determine whether or not the driver will be the winner. Logistic Regression predicts probabilities.
### Decision Tree
A tree has many analogies in real life, and it turns out that it has influenced a wide area of machine learning, covering both classification and regression. In decision analysis, a decision tree can be used to visually and explicitly represent decisions and decision making.

This methodology is more commonly known as a "learning decision tree" from data, and the above tree is called a Classification tree because the goal is to classify a driver as the winner or not.
### Random Forest
Random forest is a supervised learning algorithm. The "forest" it builds is an **ensemble of decision trees**, usually trained with the “bagging” method, a combination of learning models which increases the accuracy of the result.
A random forest eradicates the limitations of a decision tree algorithm. It reduces the overfitting of datasets and increases precision. It generates predictions without requiring many configurations.

Here's the difference between the Decision Tree and Random Forest methods:

### Support Vector Machine Algorithm (SVC)
Support Vector Machines (SVMs) are a set of supervised learning methods used for classification, regression and detection of outliers.
The advantages of support vector machines are:
- Effective in high dimensional spaces
- Still effective in cases where number of dimensions is greater than the number of samples
- Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient
- Versatile: different kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels
The objective of a SVC (Support Vector Classifier) is to fit to the data you provide, returning a "best fit" hyperplane that divides, or categorizes, your data.
### Gaussian Naive Bayes Algorithm
Naive Bayes is a classification algorithm for binary (two-class) and multi-class classification problems. The technique is easiest to understand when described using binary or categorical input values. The representation used for naive Bayes is probabilities.
A list of probabilities is stored to a file for a learned Naive Bayes model. This includes:
- **Class Probabilities:** The probabilities of each class in the training dataset.
- **Conditional Probabilities:** The conditional probabilities of each input value given each class value.
Naive Bayes can be extended to real-value attributes, most commonly by assuming a Gaussian distribution. This extension of Naive Bayes is called Gaussian Naive Bayes. Other functions can be used to estimate the distribution of the data, but the Gaussian (or normal distribution) is the easiest to work with because you only need to estimate the mean and the standard deviation from your training data.
### k Nearest Neighbor Algorithm (kNN)
The k-Nearest Neighbors (KNN) algorithm is a simple, supervised machine learning algorithm that can be used to solve both classification and regression problems.
kNN works by finding the distances between a query and all of the examples in the data, selecting the specified number examples (k) closest to the query, then voting for the most frequent label (in the case of classification) or averages the labels (in the case of regression).
The kNN algorithm assumes the similarity between the new case/data and available cases, and puts the new case into the category that is most similar to the available categories.

## Analyzing the Data
### Feature Importance
Another great quality of the random forest algorithm is that it's easy to measure the relative importance of each feature to the prediction.
The Scikit-learn Python Library provides a great tool for this which measures a feature's importance by looking at how much the tree nodes that use that feature reduce impurity across all trees in the forest. It computes this score automatically for each feature after training, and scales the results so the sum of all importance is equal to one.
### Data Visualization When Building a Model
How do you visualize the influence of the data? How do you frame the problem?
An important tool in the data scientist's toolkit is the power to visualize data using several excellent libraries such as Seaborn or MatPlotLib. Representing your data visually might allow you to uncover hidden correlations that you can leverage. Your visualizations might also help you to uncover bias or unbalanced data.

### Splitting the Dataset
Prior to training, you need to split your dataset into two or more parts of unequal size that still represent the data well.
1. Training. This part of the dataset is fit to your model to train it. This set constitutes the majority of the original dataset.
2. Testing. A test dataset is an independent group of data, often a subset of the original data, that you use to confirm the performance of the model you built.
3. Validating. A validation set is a smaller independent group of examples that you use to tune the model's hyperparameters, or architecture, to improve the model. Depending on your data's size and the question you are asking, you might not need to build this third set.
## Building the Model
Using your training data, your goal is to build a model, or a statistical representation of your data, using various algorithms to train it. Training a model exposes it to data and allows it to make assumptions about perceived patterns it discovers, validates, and accepts or rejects.
### Decide on a Training Method
Depending on your question and the nature of your data, you will choose a method to train it. Stepping through Scikit-learn's documentation, you can explore many ways to train a model. Depending on the results you get, you might have to try several different methods to build the best model. You are likely to go through a process whereby data scientists evaluate the performance of a model by feeding it unseen data, checking for accuracy, bias, and other quality-degrading issues, and selecting the most appropriate training method for the task at hand.
### Train a Model
Armed with your training data, you are ready to "fit" it to create a model. In many ML libraries you will find the code 'model.fit' - it is at this time that you send in your data as an array of values (usually 'X') and a feature variable (usually 'y').
### Evaluate the Model
Once the training process is complete, you will be able to evaluate the model's quality by using test data to gauge its performance. This data is a subset of the original data that the model has not previously analyzed. You can print out a table of metrics about your model's quality.
#### Model Fitting
In the Machine Learning context, model fitting refers to the accuracy of the model's underlying function as it attempts to analyze data with which it is not familiar.
#### Underfitting and Overfitting
Underfitting and overfitting are common problems that degrade the quality of the model, as the model either doesn't fit well enough, or it fits too well. This causes the model to make predictions either too closely aligned or too loosely aligned with its training data. An overfit model predicts training data too well because it has learned the data's details and noise too well. An underfit model is not accurate as it can neither accurately analyze its training data nor data it has not yet 'seen'.

Let's test out some algorithms to choose our path for modelling our predictions.
```
import warnings
warnings.filterwarnings("ignore")
import time
start = time.time()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from sklearn.metrics import confusion_matrix, precision_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler,LabelEncoder,OneHotEncoder
from sklearn.model_selection import cross_val_score,StratifiedKFold,RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix,precision_score,f1_score,recall_score
from sklearn.neural_network import MLPClassifier, MLPRegressor
plt.style.use('seaborn')
np.set_printoptions(precision=4)
data = pd.read_csv('./data_f1/data_filtered.csv')
data.head()
len(data)
dnf_by_driver = data.groupby('driver').sum()['driver_dnf']
driver_race_entered = data.groupby('driver').count()['driver_dnf']
driver_dnf_ratio = (dnf_by_driver/driver_race_entered)
driver_confidence = 1-driver_dnf_ratio
driver_confidence_dict = dict(zip(driver_confidence.index,driver_confidence))
driver_confidence_dict
dnf_by_constructor = data.groupby('constructor').sum()['constructor_dnf']
constructor_race_entered = data.groupby('constructor').count()['constructor_dnf']
constructor_dnf_ratio = (dnf_by_constructor/constructor_race_entered)
constructor_reliability = 1-constructor_dnf_ratio
constructor_reliability_dict = dict(zip(constructor_reliability.index,constructor_reliability))
constructor_reliability_dict
data['driver_confidence'] = data['driver'].apply(lambda x:driver_confidence_dict[x])
data['constructor_reliability'] = data['constructor'].apply(lambda x:constructor_reliability_dict[x])
#removing retired drivers and constructors
active_constructors = ['Alpine F1', 'Williams', 'McLaren', 'Ferrari', 'Mercedes',
'AlphaTauri', 'Aston Martin', 'Alfa Romeo', 'Red Bull',
'Haas F1 Team']
active_drivers = ['Daniel Ricciardo', 'Mick Schumacher', 'Carlos Sainz',
'Valtteri Bottas', 'Lance Stroll', 'George Russell',
'Lando Norris', 'Sebastian Vettel', 'Kimi Räikkönen',
'Charles Leclerc', 'Lewis Hamilton', 'Yuki Tsunoda',
'Max Verstappen', 'Pierre Gasly', 'Fernando Alonso',
'Sergio Pérez', 'Esteban Ocon', 'Antonio Giovinazzi',
'Nikita Mazepin','Nicholas Latifi']
data['active_driver'] = data['driver'].apply(lambda x: int(x in active_drivers))
data['active_constructor'] = data['constructor'].apply(lambda x: int(x in active_constructors))
data.head()
data.columns
```
## Directory to store Models
```
import os
if not os.path.exists('./models'):
os.mkdir('./models')
def position_index(x):
if x<4:
return 1
if x>10:
return 3
else :
return 2
```
## Model considering only Drivers
```
x_d= data[['GP_name','quali_pos','driver','age_at_gp_in_days','position','driver_confidence','active_driver']]
x_d = x_d[x_d['active_driver']==1]
sc = StandardScaler()
le = LabelEncoder()
x_d['GP_name'] = le.fit_transform(x_d['GP_name'])
x_d['driver'] = le.fit_transform(x_d['driver'])
x_d['GP_name'] = le.fit_transform(x_d['GP_name'])
x_d['age_at_gp_in_days'] = sc.fit_transform(x_d[['age_at_gp_in_days']])
X_d = x_d.drop(['position','active_driver'],1)
y_d = x_d['position'].apply(lambda x: position_index(x))
#cross validation for diffrent models
models = [LogisticRegression(),DecisionTreeClassifier(),RandomForestClassifier(),SVC(),GaussianNB(),KNeighborsClassifier()]
names = ['LogisticRegression','DecisionTreeClassifier','RandomForestClassifier','SVC','GaussianNB','KNeighborsClassifier']
model_dict = dict(zip(models,names))
mean_results_dri = []
results_dri = []
name = []
for model in models:
cv = StratifiedKFold(n_splits=10,random_state=1,shuffle=True)
result = cross_val_score(model,X_d,y_d,cv=cv,scoring='accuracy')
mean_results_dri.append(result.mean())
results_dri.append(result)
name.append(model_dict[model])
print(f'{model_dict[model]} : {result.mean()}')
plt.figure(figsize=(15,10))
plt.boxplot(x=results_dri,labels=name)
plt.xlabel('Models')
plt.ylabel('Accuracy')
plt.title('Model performance comparision (drivers only)')
plt.show()
```
## Model considering only Constructors
```
x_c = data[['GP_name','quali_pos','constructor','position','constructor_reliability','active_constructor']]
x_c = x_c[x_c['active_constructor']==1]
sc = StandardScaler()
le = LabelEncoder()
x_c['GP_name'] = le.fit_transform(x_c['GP_name'])
x_c['constructor'] = le.fit_transform(x_c['constructor'])
X_c = x_c.drop(['position','active_constructor'],1)
y_c = x_c['position'].apply(lambda x: position_index(x))
#cross validation for diffrent models
models = [LogisticRegression(),DecisionTreeClassifier(),RandomForestClassifier(),SVC(),GaussianNB(),KNeighborsClassifier()]
names = ['LogisticRegression','DecisionTreeClassifier','RandomForestClassifier','SVC','GaussianNB','KNeighborsClassifier']
model_dict = dict(zip(models,names))
mean_results_const = []
results_const = []
name = []
for model in models:
cv = StratifiedKFold(n_splits=10,random_state=1,shuffle=True)
result = cross_val_score(model,X_c,y_c,cv=cv,scoring='accuracy')
mean_results_const.append(result.mean())
results_const.append(result)
name.append(model_dict[model])
print(f'{model_dict[model]} : {result.mean()}')
plt.figure(figsize=(15,10))
plt.boxplot(x=results_const,labels=name)
plt.xlabel('Models')
plt.ylabel('Accuracy')
plt.title('Model performance comparision (Teams only)')
plt.show()
```
# Model considering both Drivers and Constructors
```
cleaned_data = data[['GP_name','quali_pos','constructor','driver','position','driver_confidence','constructor_reliability','active_driver','active_constructor']]
cleaned_data = cleaned_data[(cleaned_data['active_driver']==1)&(cleaned_data['active_constructor']==1)]
cleaned_data.to_csv('./data_f1/cleaned_data.csv',index=False)
```
### Build your X dataset with next columns:
- GP_name
- quali_pos to predict the classification cluster (1,2,3)
- constructor
- driver
- position
- driver confidence
- constructor_reliability
- active_driver
- active_constructor
### Filter the dataset for this Model "Driver + Constructor" all active drivers and constructors
### Create Standard Scaler and Label Encoder for the different features in order to have a similar scale for all features
### Prepare the X (Features dataset) and y for predicted value.
In our case, we want to calculate the cluster of final position for ech driver using the "position_index" function
```
# Implement X, y
```
### Applied the same list of ML Algorithms for cross validation of different models
And Store the accuracy Mean Value in order to compare with previous ML Models
```
mean_results = []
results = []
name = []
# cross validation for different models
```
### Use the same boxplot plotter used in the previous Models
```
# Implement boxplot
```
# Comparing The 3 ML Models
Let's see mean score of our three assumptions.
```
lr = [mean_results[0],mean_results_dri[0],mean_results_const[0]]
dtc = [mean_results[1],mean_results_dri[1],mean_results_const[1]]
rfc = [mean_results[2],mean_results_dri[2],mean_results_const[2]]
svc = [mean_results[3],mean_results_dri[3],mean_results_const[3]]
gnb = [mean_results[4],mean_results_dri[4],mean_results_const[4]]
knn = [mean_results[5],mean_results_dri[5],mean_results_const[5]]
font1 = {
'family':'serif',
'color':'black',
'weight':'normal',
'size':18
}
font2 = {
'family':'serif',
'color':'black',
'weight':'bold',
'size':12
}
x_ax = np.arange(3)
plt.figure(figsize=(30,15))
bar1 = plt.bar(x_ax,lr,width=0.1,align='center', label="Logistic Regression")
bar2 = plt.bar(x_ax+0.1,dtc,width=0.1,align='center', label="DecisionTree")
bar3 = plt.bar(x_ax+0.2,rfc,width=0.1,align='center', label="RandomForest")
bar4 = plt.bar(x_ax+0.3,svc,width=0.1,align='center', label="SVC")
bar5 = plt.bar(x_ax+0.4,gnb,width=0.1,align='center', label="GaussianNB")
bar6 = plt.bar(x_ax+0.5,knn,width=0.1,align='center', label="KNN")
plt.text(0.05,1,'CV score for combined data',fontdict=font1)
plt.text(1.04,1,'CV score only driver data',fontdict=font1)
plt.text(2,1,'CV score only team data',fontdict=font1)
for bar in bar1.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
for bar in bar2.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
for bar in bar3.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
for bar in bar4.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
for bar in bar5.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
for bar in bar6.patches:
yval = bar.get_height()
plt.text(bar.get_x()+0.01,yval+0.01,f'{round(yval*100,2)}%',fontdict=font2)
plt.legend(loc='center', bbox_to_anchor=(0.5, -0.10), shadow=False, ncol=6)
plt.show()
end = time.time()
import datetime
str(datetime.timedelta(seconds=(end - start)))
print(str(end - start)+" seconds")
```
| github_jupyter |
* 比较不同组合组合优化器在不同规模问题上的性能;
* 下面的结果主要比较``alphamind``和``python``中其他优化器的性能差别,我们将尽可能使用``cvxopt``中的优化器,其次选择``scipy``;
* 由于``scipy``在``ashare_ex``上面性能太差,所以一般忽略``scipy``在这个股票池上的表现;
* 时间单位都是毫秒。
* 请在环境变量中设置`DB_URI`指向数据库
```
import os
import timeit
import numpy as np
import pandas as pd
import cvxpy
from alphamind.api import *
from alphamind.portfolio.linearbuilder import linear_builder
from alphamind.portfolio.meanvariancebuilder import mean_variance_builder
from alphamind.portfolio.meanvariancebuilder import target_vol_builder
pd.options.display.float_format = '{:,.2f}'.format
```
## 0. 数据准备
------------------
```
ref_date = '2018-02-08'
u_names = ['sh50', 'hs300', 'zz500', 'zz800', 'zz1000', 'ashare_ex']
b_codes = [16, 300, 905, 906, 852, None]
risk_model = 'short'
factor = 'EPS'
lb = 0.0
ub = 0.1
data_source = os.environ['DB_URI']
engine = SqlEngine(data_source)
universes = [Universe(u_name) for u_name in u_names]
codes_set = [engine.fetch_codes(ref_date, universe=universe) for universe in universes]
data_set = [engine.fetch_data(ref_date, factor, codes, benchmark=b_code, risk_model=risk_model) for codes, b_code in zip(codes_set, b_codes)]
```
## 1. 线性优化(带线性限制条件)
---------------------------------
```
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind'])
number = 1
for u_name, sample_data in zip(u_names, data_set):
factor_data = sample_data['factor']
er = factor_data[factor].values
n = len(er)
lbound = np.ones(n) * lb
ubound = np.ones(n) * ub
risk_constraints = np.ones((n, 1))
risk_target = (np.array([1.]), np.array([1.]))
status, y, x1 = linear_builder(er, lbound, ubound, risk_constraints, risk_target)
elasped_time1 = timeit.timeit("linear_builder(er, lbound, ubound, risk_constraints, risk_target)", number=number, globals=globals()) / number * 1000
A_eq = risk_constraints.T
b_eq = np.array([1.])
w = cvxpy.Variable(n)
curr_risk_exposure = w * risk_constraints
constraints = [w >= lbound,
w <= ubound,
curr_risk_exposure == risk_target[0]]
objective = cvxpy.Minimize(-w.T * er)
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
np.testing.assert_almost_equal(x1 @ er, np.array(w.value).flatten() @ er, 4)
df.loc['alphamind', u_name] = elasped_time1
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
prob.value
```
## 2. 线性优化(带L1限制条件)
-----------------------
```
from cvxpy import pnorm
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind (clp simplex)', 'alphamind (clp interior)', 'alphamind (ecos)'])
turn_over_target = 0.5
number = 1
for u_name, sample_data in zip(u_names, data_set):
factor_data = sample_data['factor']
er = factor_data[factor].values
n = len(er)
lbound = np.ones(n) * lb
ubound = np.ones(n) * ub
if 'weight' in factor_data:
current_position = factor_data.weight.values
else:
current_position = np.ones_like(er) / len(er)
risk_constraints = np.ones((len(er), 1))
risk_target = (np.array([1.]), np.array([1.]))
status, y, x1 = linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='interior')
elasped_time1 = timeit.timeit("""linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='interior')""", number=number, globals=globals()) / number * 1000
w = cvxpy.Variable(n)
curr_risk_exposure = risk_constraints.T @ w
constraints = [w >= lbound,
w <= ubound,
curr_risk_exposure == risk_target[0],
pnorm(w - current_position, 1) <= turn_over_target]
objective = cvxpy.Minimize(-w.T * er)
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
status, y, x2 = linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='simplex')
elasped_time3 = timeit.timeit("""linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='simplex')""", number=number, globals=globals()) / number * 1000
status, y, x3 = linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='ecos')
elasped_time4 = timeit.timeit("""linear_builder(er,
lbound,
ubound,
risk_constraints,
risk_target,
turn_over_target=turn_over_target,
current_position=current_position,
method='ecos')""", number=number, globals=globals()) / number * 1000
np.testing.assert_almost_equal(x1 @ er, np.array(w.value).flatten() @ er, 4)
np.testing.assert_almost_equal(x2 @ er, np.array(w.value).flatten() @ er, 4)
np.testing.assert_almost_equal(x3 @ er, np.array(w.value).flatten() @ er, 4)
df.loc['alphamind (clp interior)', u_name] = elasped_time1
df.loc['alphamind (clp simplex)', u_name] = elasped_time3
df.loc['alphamind (ecos)', u_name] = elasped_time4
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
```
## 3. Mean - Variance 优化 (无约束)
-----------------------
```
from cvxpy import *
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind'])
number = 1
for u_name, sample_data in zip(u_names, data_set):
all_styles = risk_styles + industry_styles + ['COUNTRY']
factor_data = sample_data['factor']
risk_cov = sample_data['risk_cov'][all_styles].values
risk_exposure = factor_data[all_styles].values
special_risk = factor_data.srisk.values
sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000
er = factor_data[factor].values
n = len(er)
bm = np.zeros(n)
lbound = -np.ones(n) * np.inf
ubound = np.ones(n) * np.inf
risk_model = dict(cov=None, factor_cov=risk_cov/10000., factor_loading=risk_exposure, idsync=(special_risk**2)/10000.)
status, y, x1 = mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
None,
None,
lam=1)
elasped_time1 = timeit.timeit("""mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
None,
None,
lam=1)""",
number=number, globals=globals()) / number * 1000
w = cvxpy.Variable(n)
risk = sum_squares(multiply(special_risk / 100., w)) + quad_form((w.T * risk_exposure).T, risk_cov / 10000.)
objective = cvxpy.Minimize(-w.T * er + 0.5 * risk)
prob = cvxpy.Problem(objective)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
u1 = -x1 @ er + 0.5 * x1 @ sec_cov @ x1
x2 = np.array(w.value).flatten()
u2 = -x2 @ er + 0.5 * x2 @ sec_cov @ x2
np.testing.assert_array_almost_equal(u1, u2, 4)
df.loc['alphamind', u_name] = elasped_time1
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
```
## 4. Mean - Variance 优化 (Box约束)
---------------
```
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind'])
number = 1
for u_name, sample_data in zip(u_names, data_set):
all_styles = risk_styles + industry_styles + ['COUNTRY']
factor_data = sample_data['factor']
risk_cov = sample_data['risk_cov'][all_styles].values
risk_exposure = factor_data[all_styles].values
special_risk = factor_data.srisk.values
sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000
er = factor_data[factor].values
n = len(er)
bm = np.zeros(n)
lbound = np.zeros(n)
ubound = np.ones(n) * 0.1
risk_model = dict(cov=None, factor_cov=risk_cov/10000., factor_loading=risk_exposure, idsync=(special_risk**2)/10000.)
status, y, x1 = mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
None,
None)
elasped_time1 = timeit.timeit("""mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
None,
None)""",
number=number, globals=globals()) / number * 1000
w = cvxpy.Variable(n)
risk = sum_squares(multiply(special_risk / 100., w)) + quad_form((w.T * risk_exposure).T, risk_cov / 10000.)
objective = cvxpy.Minimize(-w.T * er + 0.5 * risk)
constraints = [w >= lbound,
w <= ubound]
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
u1 = -x1 @ er + 0.5 * x1 @ sec_cov @ x1
x2 = np.array(w.value).flatten()
u2 = -x2 @ er + 0.5 * x2 @ sec_cov @ x2
np.testing.assert_array_almost_equal(u1, u2, 4)
df.loc['alphamind', u_name] = elasped_time1
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
```
## 5. Mean - Variance 优化 (Box约束以及线性约束)
----------------
```
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind'])
number = 1
for u_name, sample_data in zip(u_names, data_set):
all_styles = risk_styles + industry_styles + ['COUNTRY']
factor_data = sample_data['factor']
risk_cov = sample_data['risk_cov'][all_styles].values
risk_exposure = factor_data[all_styles].values
special_risk = factor_data.srisk.values
sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000
er = factor_data[factor].values
n = len(er)
bm = np.zeros(n)
lbound = np.zeros(n)
ubound = np.ones(n) * 0.1
risk_constraints = np.ones((len(er), 1))
risk_target = (np.array([1.]), np.array([1.]))
risk_model = dict(cov=None, factor_cov=risk_cov/10000., factor_loading=risk_exposure, idsync=(special_risk**2)/10000.)
status, y, x1 = mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
risk_constraints,
risk_target)
elasped_time1 = timeit.timeit("""mean_variance_builder(er,
risk_model,
bm,
lbound,
ubound,
risk_constraints,
risk_target)""",
number=number, globals=globals()) / number * 1000
w = cvxpy.Variable(n)
risk = sum_squares(multiply(special_risk / 100., w)) + quad_form((w.T * risk_exposure).T, risk_cov / 10000.)
objective = cvxpy.Minimize(-w.T * er + 0.5 * risk)
curr_risk_exposure = risk_constraints.T @ w
constraints = [w >= lbound,
w <= ubound,
curr_risk_exposure == risk_target[0]]
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
u1 = -x1 @ er + 0.5 * x1 @ sec_cov @ x1
x2 = np.array(w.value).flatten()
u2 = -x2 @ er + 0.5 * x2 @ sec_cov @ x2
np.testing.assert_array_almost_equal(u1, u2, 4)
df.loc['alphamind', u_name] = elasped_time1
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
```
## 6. 线性优化(带二次限制条件)
-------------------------
```
df = pd.DataFrame(columns=u_names, index=['cvxpy', 'alphamind'])
number = 1
target_vol = 0.5
for u_name, sample_data in zip(u_names, data_set):
all_styles = risk_styles + industry_styles + ['COUNTRY']
factor_data = sample_data['factor']
risk_cov = sample_data['risk_cov'][all_styles].values
risk_exposure = factor_data[all_styles].values
special_risk = factor_data.srisk.values
sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000
er = factor_data[factor].values
n = len(er)
if 'weight' in factor_data:
bm = factor_data.weight.values
else:
bm = np.ones_like(er) / n
lbound = np.zeros(n)
ubound = np.ones(n) * 0.1
risk_constraints = np.ones((n, 1))
risk_target = (np.array([bm.sum()]), np.array([bm.sum()]))
risk_model = dict(cov=None, factor_cov=risk_cov/10000., factor_loading=risk_exposure, idsync=(special_risk**2)/10000.)
status, y, x1 = target_vol_builder(er,
risk_model,
bm,
lbound,
ubound,
risk_constraints,
risk_target,
vol_target=target_vol)
elasped_time1 = timeit.timeit("""target_vol_builder(er,
risk_model,
bm,
lbound,
ubound,
risk_constraints,
risk_target,
vol_target=target_vol)""",
number=number, globals=globals()) / number * 1000
w = cvxpy.Variable(n)
risk = sum_squares(multiply(special_risk / 100., w)) + quad_form((w.T * risk_exposure).T, risk_cov / 10000.)
objective = cvxpy.Minimize(-w.T * er)
curr_risk_exposure = risk_constraints.T @ w
constraints = [w >= lbound,
w <= ubound,
curr_risk_exposure == risk_target[0],
risk <= target_vol * target_vol]
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='ECOS')
elasped_time2 = timeit.timeit("prob.solve(solver='ECOS')",
number=number, globals=globals()) / number * 1000
u1 = -x1 @ er
x2 = np.array(w.value).flatten()
u2 = -x2 @ er
np.testing.assert_array_almost_equal(u1, u2, 4)
df.loc['alphamind', u_name] = elasped_time1
df.loc['cvxpy', u_name] = elasped_time2
alpha_logger.info(f"{u_name} is finished")
df
```
| github_jupyter |
# Day and Night Image Classifier
---
The day/night image dataset consists of 200 RGB color images in two categories: day and night. There are equal numbers of each example: 100 day images and 100 night images.
We'd like to build a classifier that can accurately label these images as day or night, and that relies on finding distinguishing features between the two types of images!
*Note: All images come from the [AMOS dataset](http://cs.uky.edu/~jacobs/datasets/amos/) (Archive of Many Outdoor Scenes).*
### Import resources
Before you get started on the project code, import the libraries and resources that you'll need.
```
import cv2 # computer vision library
import helpers
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
## Training and Testing Data
The 200 day/night images are separated into training and testing datasets.
* 60% of these images are training images, for you to use as you create a classifier.
* 40% are test images, which will be used to test the accuracy of your classifier.
First, we set some variables to keep track of some where our images are stored:
image_dir_training: the directory where our training image data is stored
image_dir_test: the directory where our test image data is stored
```
# Image data directories
image_dir_training = "day_night_images/training/"
image_dir_test = "day_night_images/test/"
```
## Load the datasets
These first few lines of code will load the training day/night images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label ("day" or "night").
For example, the first image-label pair in `IMAGE_LIST` can be accessed by index:
``` IMAGE_LIST[0][:]```.
```
# Using the load_dataset function in helpers.py
# Load training data
IMAGE_LIST = helpers.load_dataset(image_dir_training)
```
## Construct a `STANDARDIZED_LIST` of input images and output labels.
This function takes in a list of image-label pairs and outputs a **standardized** list of resized images and numerical labels.
```
# Standardize all training images
STANDARDIZED_LIST = helpers.standardize(IMAGE_LIST)
```
## Visualize the standardized data
Display a standardized image from STANDARDIZED_LIST.
```
# Display a standardized image and its label
# Select an image by index
image_num = 0
selected_image = STANDARDIZED_LIST[image_num][0]
selected_label = STANDARDIZED_LIST[image_num][1]
# Display image and data about it
plt.imshow(selected_image)
print("Shape: "+str(selected_image.shape))
print("Label [1 = day, 0 = night]: " + str(selected_label))
```
# Feature Extraction
Create a feature that represents the brightness in an image. We'll be extracting the **average brightness** using HSV colorspace. Specifically, we'll use the V channel (a measure of brightness), add up the pixel values in the V channel, then divide that sum by the area of the image to get the average Value of the image.
## RGB to HSV conversion
Below, a test image is converted from RGB to HSV colorspace and each component is displayed in an image.
```
# Convert and image to HSV colorspace
# Visualize the individual color channels
image_num = 0
test_im = STANDARDIZED_LIST[image_num][0]
test_label = STANDARDIZED_LIST[image_num][1]
# Convert to HSV
hsv = cv2.cvtColor(test_im, cv2.COLOR_RGB2HSV)
# Print image label
print('Label: ' + str(test_label))
# HSV channels
h = hsv[:,:,0]
s = hsv[:,:,1]
v = hsv[:,:,2]
# Plot the original image and the three channels
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,10))
ax1.set_title('Standardized image')
ax1.imshow(test_im)
ax2.set_title('H channel')
ax2.imshow(h, cmap='gray')
ax3.set_title('S channel')
ax3.imshow(s, cmap='gray')
ax4.set_title('V channel')
ax4.imshow(v, cmap='gray')
```
---
### Find the average brightness using the V channel
This function takes in a **standardized** RGB image and returns a feature (a single value) that represent the average level of brightness in the image. We'll use this value to classify the image as day or night.
```
# Find the average Value or brightness of an image
def avg_brightness(rgb_image):
# Convert image to HSV
hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)
# Add up all the pixel values in the V channel
sum_brightness = np.sum(hsv[:,:,2])
## TODO: Calculate the average brightness using the area of the image
# and the sum calculated above
avg = 0
avg = sum_brightness/rgb_image.shape[0]/rgb_image.shape[1]
return avg
# Testing average brightness levels
# Look at a number of different day and night images and think about
# what average brightness value separates the two types of images
# As an example, a "night" image is loaded in and its avg brightness is displayed
image_num = 190
test_im = STANDARDIZED_LIST[image_num][0]
avg = avg_brightness(test_im)
print('Avg brightness: ' + str(avg))
plt.imshow(test_im)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Abhishekauti21/dsmp-pre-work/blob/master/practice_project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
class test:
def __init__(self,a):
self.a=a
def display(self):
print(self.a)
obj=test()
obj.display()
def f1():
x=100
print(x)
x=+1
f1()
area = { 'living' : [400, 450], 'living' : [650, 800], 'kitchen' : [300, 250], 'garage' : [250, 0]}
print (area['living'])
List_1=[2,6,7,8]
List_2=[2,6,7,8]
print(List_1[-2] + List_2[2])
d = {0: 'a', 1: 'b', 2: 'c'}
for x, y in d.items():
print(x, y)
Numbers=[10,5,7,8,9,5]
print(max(Numbers)-min(Numbers))
fo = open("foo.txt", "read+")
print("Name of the file: ", fo.name)
# Assuming file has following 5 lines
# This is 1st line
# This is 2nd line
# This is 3rd line
# This is 4th line
# This is 5th line
for index in range(5):
line = fo.readline()
print("Line No {} - {}".format(index, line))
#Close opened file
fo.close()
x = "abcdef"
while i in x:
print(i, end=" ")
def cube(x):
return x * x * x
x = cube(3)
print (x)
print(((True) or (False) and (False) or (False)))
x1=int('16')
x2=8 + 8
x3= (4**2)
print(x1 is x2 is x3)
Word='warrior knights' ,A=Word[9:14],B=Word[-13:-16:-1]
B+A
def to_upper(k):
return k.upper()
x = ['ab', 'cd']
print(list(map(to_upper, x)))
my_string = "hello world"
k = [(i.upper(), len(i)) for i in my_string]
print(k)
from csv import reader
def explore_data(dataset, start, end, rows_and_columns=False):
"""Explore the elements of a list.
Print the elements of a list starting from the index 'start'(included) upto the index 'end' (excluded).
Keyword arguments:
dataset -- list of which we want to see the elements
start -- index of the first element we want to see, this is included
end -- index of the stopping element, this is excluded
rows_and_columns -- this parameter is optional while calling the function. It takes binary values, either True or False. If true, print the dimension of the list, else dont.
"""
dataset_slice = dataset[start:end]
for row in dataset_slice:
print(row)
print('\n') # adds a new (empty) line between rows
if rows_and_columns:
print('Number of rows:', len(dataset))
print('Number of columns:', len(dataset[0]))
def duplicate_and_unique_movies(dataset, index_):
"""Check the duplicate and unique entries.
We have nested list. This function checks if the rows in the list is unique or duplicated based on the element at index 'index_'.
It prints the Number of duplicate entries, along with some examples of duplicated entry.
Keyword arguments:
dataset -- two dimensional list which we want to explore
index_ -- column index at which the element in each row would be checked for duplicacy
"""
duplicate = []
unique = []
for movie in dataset:
name = movie[index_]
if name in unique:
duplicate.append(name)
else:
unique.append(name)
print('Number of duplicate Movies:', len(duplicate))
print('\n')
print('Examples of duplicate Movies:', duplicate[:15])
def movies_lang(dataset, index_, lang_):
"""Extract the movies of a particular language.
Of all the movies available in all languages, this function extracts all the movies in a particular laguage.
Once you ahve extracted the movies, call the explore_data() to print first few rows.
Keyword arguments:
dataset -- list containing the details of the movie
index_ -- index which is to be compared for langauges
lang_ -- desired language for which we want to filter out the movies
Returns:
movies_ -- list with details of the movies in selected language
"""
movies_ = []
for movie in movies:
lang = movie[index_]
if lang == lang_:
movies_.append(movie)
print("Examples of Movies in English Language:")
explore_data(movies_, 0, 3, True)
return movies_
def rate_bucket(dataset, rate_low, rate_high):
"""Extract the movies within the specified ratings.
This function extracts all the movies that has rating between rate_low and high_rate.
Once you ahve extracted the movies, call the explore_data() to print first few rows.
Keyword arguments:
dataset -- list containing the details of the movie
rate_low -- lower range of rating
rate_high -- higher range of rating
Returns:
rated_movies -- list of the details of the movies with required ratings
"""
rated_movies = []
for movie in dataset:
vote_avg = float(movie[-4])
if ((vote_avg >= rate_low) & (vote_avg <= rate_high)):
rated_movies.append(movie)
print("Examples of Movies in required rating bucket:")
explore_data(rated_movies, 0, 3, True)
return rated_movies
# Read the data file and store it as a list 'movies'
opened_file = open(path, encoding="utf8")
read_file = reader(opened_file)
movies = list(read_file)
# The first row is header. Extract and store it in 'movies_header'.
movies_header = movies[0]
print("Movies Header:\n", movies_header)
# Subset the movies dataset such that the header is removed from the list and store it back in movies
movies = movies[1:]
# Delete wrong data
# Explore the row #4553. You will see that as apart from the id, description, status and title, no other information is available.
# Hence drop this row.
print("Entry at index 4553:")
explore_data(movies, 4553, 4554)
del movies[4553]
# Using explore_data() with appropriate parameters, view the details of the first 5 movies.
print("First 5 Entries:")
explore_data(movies, 0, 5, True)
# Our dataset might have more than one entry for a movie. Call duplicate_and_unique_movies() with index of the name to check the same.
duplicate_and_unique_movies(movies, 13)
# We saw that there are 3 movies for which the there are multiple entries.
# Create a dictionary, 'reviews_max' that will have the name of the movie as key, and the maximum number of reviews as values.
reviews_max = {}
for movie in movies:
name = movie[13]
n_reviews = float(movie[12])
if name in reviews_max and reviews_max[name] < n_reviews:
reviews_max[name] = n_reviews
elif name not in reviews_max:
reviews_max[name] = n_reviews
len(reviews_max)
# Create a list 'movies_clean', which will filter out the duplicate movies and contain the rows with maximum number of reviews for duplicate movies, as stored in 'review_max'.
movies_clean = []
already_added = []
for movie in movies:
name = movie[13]
n_reviews = float(movie[12])
if (reviews_max[name] == n_reviews) and (name not in already_added):
movies_clean.append(movie)
already_added.append(name)
len(movies_clean)
# Calling movies_lang(), extract all the english movies and store it in movies_en.
movies_en = movies_lang(movies_clean, 3, 'en')
# Call the rate_bucket function to see the movies with rating higher than 8.
high_rated_movies = rate_bucket(movies_en, 8, 10)
```
| github_jupyter |
# Detecting COVID-19 with Chest X Ray using PyTorch
Image classification of Chest X Rays in one of three classes: Normal, Viral Pneumonia, COVID-19
Dataset from [COVID-19 Radiography Dataset](https://www.kaggle.com/tawsifurrahman/covid19-radiography-database) on Kaggle
# Importing Libraries
```
from google.colab import drive
drive.mount('/gdrive')
%matplotlib inline
import os
import shutil
import copy
import random
import torch
import torch.nn as nn
import torchvision
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import seaborn as sns
import time
from sklearn.metrics import confusion_matrix
from PIL import Image
import matplotlib.pyplot as plt
torch.manual_seed(0)
print('Using PyTorch version', torch.__version__)
```
# Preparing Training and Test Sets
```
class_names = ['Non-Covid', 'Covid']
root_dir = '/gdrive/My Drive/Research_Documents_completed/Data/Data/'
source_dirs = ['non', 'covid']
```
# Creating Custom Dataset
```
class ChestXRayDataset(torch.utils.data.Dataset):
def __init__(self, image_dirs, transform):
def get_images(class_name):
images = [x for x in os.listdir(image_dirs[class_name]) if x.lower().endswith('png') or x.lower().endswith('jpg')]
print(f'Found {len(images)} {class_name} examples')
return images
self.images = {}
self.class_names = ['Non-Covid', 'Covid']
for class_name in self.class_names:
self.images[class_name] = get_images(class_name)
self.image_dirs = image_dirs
self.transform = transform
def __len__(self):
return sum([len(self.images[class_name]) for class_name in self.class_names])
def __getitem__(self, index):
class_name = random.choice(self.class_names)
index = index % len(self.images[class_name])
image_name = self.images[class_name][index]
image_path = os.path.join(self.image_dirs[class_name], image_name)
image = Image.open(image_path).convert('RGB')
return self.transform(image), self.class_names.index(class_name)
```
# Image Transformations
```
train_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
```
# Prepare DataLoader
```
train_dirs = {
'Non-Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/non/',
'Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/covid/'
}
#train_dirs = {
# 'Non-Covid': '/gdrive/My Drive/Data/Data/non/',
# 'Covid': '/gdrive/My Drive/Data/Data/covid/'
#}
train_dataset = ChestXRayDataset(train_dirs, train_transform)
test_dirs = {
'Non-Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/test/non/',
'Covid': '/gdrive/My Drive/Research_Documents_completed/Data/Data/test/covid/'
}
test_dataset = ChestXRayDataset(test_dirs, test_transform)
batch_size = 25
dl_train = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
dl_test = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
print(dl_train)
print('Number of training batches', len(dl_train))
print('Number of test batches', len(dl_test))
```
# Data Visualization
```
class_names = train_dataset.class_names
def show_images(images, labels, preds):
plt.figure(figsize=(30, 20))
for i, image in enumerate(images):
plt.subplot(1, 25, i + 1, xticks=[], yticks=[])
image = image.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = image * std + mean
image = np.clip(image, 0., 1.)
plt.imshow(image)
col = 'green'
if preds[i] != labels[i]:
col = 'red'
plt.xlabel(f'{class_names[int(labels[i].numpy())]}')
plt.ylabel(f'{class_names[int(preds[i].numpy())]}', color=col)
plt.tight_layout()
plt.show()
images, labels = next(iter(dl_train))
show_images(images, labels, labels)
images, labels = next(iter(dl_test))
show_images(images, labels, labels)
```
# Creating the Model
```
resnet18 = torchvision.models.resnet18(pretrained=True)
print(resnet18)
resnet18.fc = torch.nn.Linear(in_features=512, out_features=2)
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(resnet18.parameters(), lr=3e-5)
print(resnet18)
def show_preds():
resnet18.eval()
images, labels = next(iter(dl_test))
outputs = resnet18(images)
_, preds = torch.max(outputs, 1)
show_images(images, labels, preds)
show_preds()
```
# Training the Model
```
def train(epochs):
best_model_wts = copy.deepcopy(resnet18.state_dict())
b_acc = 0.0
t_loss = []
t_acc = []
avg_t_loss=[]
avg_t_acc=[]
v_loss = []
v_acc=[]
avg_v_loss = []
avg_v_acc = []
ep = []
print('Starting training..')
for e in range(0, epochs):
ep.append(e+1)
print('='*20)
print(f'Starting epoch {e + 1}/{epochs}')
print('='*20)
train_loss = 0.
val_loss = 0.
train_accuracy = 0
total_train = 0
correct_train = 0
resnet18.train() # set model to training phase
for train_step, (images, labels) in enumerate(dl_train):
optimizer.zero_grad()
outputs = resnet18(images)
_, pred = torch.max(outputs, 1)
loss = loss_fn(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= (train_step + 1)
_, predicted = torch.max(outputs, 1)
total_train += labels.nelement()
correct_train += sum((predicted == labels).numpy())
train_accuracy = correct_train / total_train
t_loss.append(train_loss)
t_acc.append(train_accuracy)
if train_step % 20 == 0:
print('Evaluating at step', train_step)
print(f'Training Loss: {train_loss:.4f}, Training Accuracy: {train_accuracy:.4f}')
accuracy = 0.
resnet18.eval() # set model to eval phase
for val_step, (images, labels) in enumerate(dl_test):
outputs = resnet18(images)
loss = loss_fn(outputs, labels)
val_loss += loss.item()
_, preds = torch.max(outputs, 1)
accuracy += sum((preds == labels).numpy())
val_loss /= (val_step + 1)
accuracy = accuracy/len(test_dataset)
print(f'Validation Loss: {val_loss:.4f}, Validation Accuracy: {accuracy:.4f}')
v_loss.append(val_loss)
v_acc.append(accuracy)
show_preds()
resnet18.train()
if accuracy > b_acc:
b_acc = accuracy
avg_t_loss.append(sum(t_loss)/len(t_loss))
avg_v_loss.append(sum(v_loss)/len(v_loss))
avg_t_acc.append(sum(t_acc)/len(t_acc))
avg_v_acc.append(sum(v_acc)/len(v_acc))
best_model_wts = copy.deepcopy(resnet18.state_dict())
print('Best validation Accuracy: {:4f}'.format(b_acc))
print('Training complete..')
plt.plot(ep, avg_t_loss, 'g', label='Training loss')
plt.plot(ep, avg_v_loss, 'b', label='validation loss')
plt.title('Training and Validation loss for each epoch')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('/gdrive/My Drive/Research_Documents_completed/Resnet18_completed/resnet18_loss.png')
plt.show()
plt.plot(ep, avg_t_acc, 'g', label='Training accuracy')
plt.plot(ep, avg_v_acc, 'b', label='validation accuracy')
plt.title('Training and Validation Accuracy for each epoch')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('/gdrive/My Drive/Research_Documents_completed/Resnet18_completed/resnet18_accuarcy.png')
plt.show()
torch.save(resnet18.state_dict(),'/gdrive/My Drive/Research_Documents_completed/Resnet18_completed/resnet18.pt')
%%time
train(epochs=5)
```
# Final Results
VALIDATION LOSS AND TRAINING LOSS VS EPOCH
VALIDATION ACCURACY AND TRAINING ACCURACY VS EPOCH
BEST ACCURACY ERROR..
```
show_preds()
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=ByZjmtFgB_Y5).
```
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/swift/tutorials/python_interoperability"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/swift/blob/main/docs/site/tutorials/python_interoperability.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/swift/blob/main/docs/site/tutorials/python_interoperability.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
# Python interoperability
Swift For TensorFlow supports Python interoperability.
You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
```
import PythonKit
print(Python.version)
```
## Setting the Python version
By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed.
To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example:
`export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`
The exact filename will differ across Python environments and platforms.
Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.
In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
```
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
```
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__
Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674#discussion_r235207008).
## Basics
In Swift, `PythonObject` represents an object from Python.
All Python APIs use and return `PythonObject` instances.
Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.
`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
```
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
```
`PythonObject` defines conformances to many standard Swift protocols:
* `Equatable`
* `Comparable`
* `Hashable`
* `SignedNumeric`
* `Strideable`
* `MutableCollection`
* All of the `ExpressibleBy_Literal` protocols
Note that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
```
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
```
To convert tuples from Python to Swift, you must statically know the arity of the tuple.
Call one of the following instance methods:
- `PythonObject.tuple2`
- `PythonObject.tuple3`
- `PythonObject.tuple4`
```
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
```
## Python builtins
Access Python builtins via the global `Python` interface.
```
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
```
## Importing Python modules
Use `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
```
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
```
Use the throwing function `Python.attemptImport` to perform safe importing.
```
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
```
## Conversion with `numpy.ndarray`
The following Swift types can be converted to and from `numpy.ndarray`:
- `Array<Element>`
- `ShapedArray<Scalar>`
- `Tensor<Scalar>`
Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.
For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
```
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
```
## Displaying images
You can display images in-line using `matplotlib`, just like in Python notebooks.
```
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
print(IPythonDisplay.shell.enable_matplotlib("inline"))
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import scipy as sp
from scipy import sparse
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import string
import re
import glob
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, FeatureHasher
import keras
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Embedding, LSTM, Dropout
from keras.models import Sequential
import matplotlib.pyplot as plt
print('Keras version: %s' % keras.__version__)
PATH = "data/aclImdb"
# or use nltk or spacy
htmltag = re.compile(r'<.*?>')
numbers = re.compile(r'[0-9]')
quotes = re.compile(r'\"|`')
punctuation = re.compile(r'([%s])'% string.punctuation)
english_stopwords =set(stopwords.words('english'))
stemmer = PorterStemmer()
# read files in the given tree, using subfolders as the target classes
def read_files(folder, subfolders):
corpus, labels = [], []
for index, label in enumerate(subfolders):
path = '/'.join([folder, label, '*.txt'])
for filename in glob.glob(path):
corpus.append(open(filename, 'r').read())
labels.append(index)
return corpus, np.array(labels).astype(np.int)
# pre-processor
def preprocess(s):
# lowercase
s = s.lower()
# remove html tags
s = htmltag.sub(' ', s)
# remove numbers
s = numbers.sub(' ', s)
# remove quotes
s = quotes.sub(' ', s)
# replace puctuation
s = punctuation.sub(' ', s)
return s
# tokenization
def tokenize(s):
# use a serious tokenizer
tokens = nltk.word_tokenize(s)
# remove stopwords
tokens = filter(lambda w: not w in english_stopwords, tokens)
# stem words
tokens = [stemmer.stem(token) for token in tokens]
return tokens
#coprus_train_pos = [open(filename, 'r').read() for filename in glob.glob(PATH + '/train/pos/*.txt')]
#coprus_train_neg = [open(filename, 'r').read() for filename in glob.glob(PATH + '/train/neg/*.txt')]
corpus_train, y_train = read_files(PATH + '/train', ['neg', 'pos'])
corpus_test, y_test = read_files(PATH + '/test', ['neg', 'pos'])
len(corpus_train), len(y_train), corpus_train[0], y_train[0], corpus_train[24999], y_train[24999]
len(corpus_test), len(y_test), corpus_test[0], y_test[0]
vectorizer = CountVectorizer(preprocessor=preprocess, tokenizer=tokenize)
term_doc_train = vectorizer.fit_transform(corpus_train)
term_doc_test = vectorizer.transform(corpus_test)
vocab = vectorizer.get_feature_names()
vocab[100:102]
vocab_size = len(vocab)
h = FeatureHasher(n_features=10, input_type='string')
f = h.fit_transform(['q', 'w'])
f.shape, f.toarray()
term_doc_train[0]
term_doc_train[100].toarray()
vectorizer.vocabulary_['cool']
# Multinomial Naive Bayes
alpha = 0.1 # smoothing parameter
class MultinomialNaiveBayes():
"""
Arguments:
alpha: smoothing parameter
"""
def __init__(self, alpha=0.1):
self.b = 0
self.r = 0
self.alpha = alpha
def fit(self, X, y):
# bias
N_pos = (y==1).shape[0]
N_neg = (y==0).shape[0]
self.b = np.log(N_pos / N_neg)
# count of occurences for every token in vocabulary as they appear in positive samples
p = alpha + X[y==1].sum(axis=0)
p_l1 = np.linalg.norm(p, ord=1) # L1 norm
# count of occurences for every token in vocabulary as they appear in negative samples
q = alpha + X[y==0].sum(axis=0)
q_l1 = np.linalg.norm(q, ord=1) # L1 norm
# log count ratio
self.r = np.log((p/p_l1) / (q/q_l1))
#self.r = sp.sparse.csr_matrix(self.r.T)
return self.r, self.b
def predict(self, X):
y_pred = np.sign(sp.sparse.csr_matrix.dot(X, self.r.T) + self.b)
y_pred[y_pred==-1] = 0
return y_pred
def score(self, X, y):
y_predict = self.predict(X)
y_reshaped = np.reshape(y, y_predict.shape)
return (y_reshaped == y_predict).mean()
model = MultinomialNaiveBayes()
r, b = model.fit(term_doc_train, y_train)
b, r.shape, term_doc_train.shape
term_doc_train.shape, r.shape, term_doc_train[0], r
# accuracy on training set
y_pred = model.predict(term_doc_train)
#y_train = np.reshape(y_train, (25000, 1))
(np.reshape(y_train, (25000, 1)) == y_pred).mean()
# accuracy on validation set
y_pred2 = model.predict(term_doc_test)
#y_test = np.reshape(y_test, (25000, 1))
(np.reshape(y_test, (25000, 1)) == y_pred2).mean()
# now let's binary term document
term_doc_train = term_doc_train.sign() # turn everything into 1 or 0
term_doc_test = term_doc_test.sign() # turn everything into 1 or 0
term_doc_train.shape, term_doc_test.shape
model = MultinomialNaiveBayes()
model.fit(term_doc_train, y_train)
accuracy_train = model.score(term_doc_train, y_train)
accuracy_test = model.score(term_doc_test, y_test)
accuracy_train, accuracy_test
term_doc_train.shape, y_train.shape, term_doc_train[y_train==0].sum(axis=0).shape, term_doc_train[y_train==1].sum(axis=0).shape
(y_train==0).shape, (y_train==1).shape, y_pred.shape
# now with plain logistic regression
model = LogisticRegression()
model.fit(term_doc_train, y_train)
# accuracy on training
y_pred = model.predict(term_doc_train)
accuracy_train = (y_train == y_pred).mean()
# accuracy on validation
y_pred = model.predict(term_doc_test)
accuracy_test = (y_test == y_pred).mean()
accuracy_train, accuracy_test
# now with regularized logistic regression
model = LogisticRegression(C=0.01, dual=True)
model.fit(term_doc_train, y_train)
# accuracy on training
y_pred = model.predict(term_doc_train)
accuracy_train = (y_train == y_pred).mean()
# accuracy on validation
y_pred = model.predict(term_doc_test)
accuracy_test = (y_test == y_pred).mean()
accuracy_train, accuracy_test
# now combining Naive Base and Logistic Regression
"""
class NBLR(keras.Model):
def __init__(self):
super(NBLR, self).__init__(name='NBLR')
self.softmax = keras.layers.Activation('softmax')
def call(self, inputs):
out = self.softmax(inputs)
return out
model = NBLR()
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
losses = model.fit(x=term_doc_train, y=y_train)
"""
```
| github_jupyter |
## LDA
The graphical model representation of LDA is given blow:
<img src="figures/LDA.png">
The basic idea of LDA is that documents are represented as random mixtures over latent topics, where each topic is characterized by a distribution over words.
LDA assumes the following generative process for each document $\mathbf{w}$ in a corpus $\mathcal{D}$:
1. Choose $N\sim$ Poisson($\xi$).
2. Choose $\theta \sim $Dir($\alpha$).
3. For each of the $N$ words $w_n$:
a) Choose a topic $z_n\sim$ Multinomial($\theta$)
b) Choose a word $w_n$ from $p(w_n|z_n, \beta)$, a multinomial probability conditioned on the topic $z_n$.
Several simplifying assumptions are made in this basic model:
1. The dimensionality $k$ of the Dirichlet distribution (and thus the dimensionality of the topic variable $z$) is assumed known and fixed.
2. The word probabilities are parameterized by a $k\times V$ matrix $\beta$ where $\beta_{ij} = p(w^j=1 | z^i=1)$, which for now we treat as a fixed quantity that is to be estimated.
3. The Poisson assumption is not critical to anything that follows and more realistic document length distributions can be used as needed.
A $k$-dimensional Dirichlet random variable $\theta$ can take values in the $(k-1)$-simplex (a $k$-vector $\theta$ in the $(k-1)$-simplex if $\theta_i\ge 0, \sum_{i=1}^k\theta_i=1$), and has the following probability density on this simplex:
$$
p(\theta|\alpha) = \frac{\Gamma(\sum_{i=1}^k\alpha_i)}{\prod_{i=1}^k\Gamma(\alpha_i)}\theta_1^{\alpha_1-1}\cdots\theta_k^{a_k-1},
$$
where the parameter $\alpha$ is a $k$-vector with components $\alpha_i > 0$, and where $\Gamma(x)$ is the Gamma function.
Given the parameters $\alpha$ and $\beta$, the joint distribution of a topic mixture $\theta$, a set of $N$ topics $\mathbf{z}$, and a set of $N$ words $\mathbf{w}$ is given by:
$$
p(\theta, \mathbf{z}|\alpha, \beta) = p(\theta | \alpha) \prod_{n=1}^N p(z_n|\theta)p(w_n|z_n,\beta),
$$
where $p(z_n|\theta)$ is simply $\theta_i$ for the unique $i$ such that $z_n^i = 1$. Integrating over $\theta$ and summing over $z$, we obtain the marginal distribution of a document:
$$
p(\mathbf{w}|\alpha,\beta) = \int p(\theta|\alpha)\left(\prod_{n=1}^N\sum_{z_n}p(z_n|\theta) p(w_n|z_n,\beta)\right)d\theta.
$$
Finally, taking the product of the marginal probabilities of single documents, we obtain the probability of a corpus:
$$
p(\mathcal{D}|\alpha,\beta) = \prod_{d=1}^M\int p(\theta_d|\alpha)\left(\prod_{n=1}^{N_d}\sum_{z_{d_n}}p(z_{d_n}|\theta_d)p(w_{d_n}| z_{d_n},\beta)\right)d\theta_d.
$$
There are **three** levels to the LDA representation. The parameters $\alpha$ and $\beta$ are corpus level parameters, assumed to be sampled once in the process of generating a corpus.The variables $\theta_d$ are document-level variables, sampled once per document. Finally the variables $z_{dn}$ and $w_{dn}$ are word-level variables and are sampled once for each word in each document.
Note the topic node is sampled *repeatedly* within the document. Under LDA, documents can be associated with multiple topics.
### Inference
The key inference problem that we need to solve in order to use LDA is that of computing the posterior distribution of the hidden variables given a document:
$$
p(\theta, \mathbf{z}|\mathbf{w}, \alpha, \beta) = \frac{p(\theta, \mathbf{z}, \mathbf{w}| \alpha, \beta)}{p(\mathbf{w}|\alpha, \beta)}.
$$
Unfortunately, this distribution is intractable to compute in general. We can however use a variety of variety of approximate inference algorithms.
### Variational Inference
Convexity based variational algorithm for inference in LDA.
Basic idea:
- Use Jensen's inequality to obtain an adjustable lower bound on the log likelihood
## Probabilistic latent semantic indexing
This model posits that a document label $d$ and a word $w_n$ are conditionally independent given an unobserved topic $z$:
$$
p(d, w_n) = p(d)\sum_{z}p(w_n|z)p(z|d).
$$
The pLSI model attempts to relax the simplifying assumption made in the mixture of unigrams model that each document is generated from only one topic. In a sense, it does capture the possibility that a document may contain multiple topics since $p(z|d)$ serves as the mixture weights of the topics for a particular document $d$.
However, we need to note several problems:
1. $d$ is a dummy index into the list of documents in the *training set*. Thus, $d$ is a multinomial random variable with as many possible values as there are training documents and the model learns the topic mixtures $p(z|d)$ only for those documents on which it is trained.
2. Also stems from the use of a distribution index by training documents, is that the number of parameters which must be estimated grows linearly with the number of training documents. The parameters for a $k$-topic PLSI model are $k$ multinomial distributions of size $V$ and $M$ mixtures over the $k$ hidden topics. This gives $kV + kM$ parameters and therefore linear growth in $M$.
$\therefore$ pLSI is not a well-defined generative model of documents; there is no natural way to use it to assign probability to a previously seen document. Also, this linear growth in parameters suggests that the model is prone to overfitting.
The principal advantages of generative models such as LDA include their modularity and their extensibility. As a probabilistic module, LDA can be readily embedded in a more complex model
LDA overcomes both problems of pLSI by treating the topic mixture weights as a $k$-parameter hidden *random variables* rather than a large set of individual parameters which are explicitly linked to the training set.
| github_jupyter |
<a href="https://colab.research.google.com/github/100rab-S/TensorFlow-Advanced-Techniques/blob/main/C1W3_L3_CustomLayerWithActivation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Ungraded Lab: Activation in Custom Layers
In this lab, we extend our knowledge of building custom layers by adding an activation parameter. The implementation is pretty straightforward as you'll see below.
## Imports
```
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.layers import Layer
```
## Adding an activation layer
To use the built-in activations in Keras, we can specify an `activation` parameter in the `__init__()` method of our custom layer class. From there, we can initialize it by using the `tf.keras.activations.get()` method. This takes in a string identifier that corresponds to one of the [available activations](https://keras.io/api/layers/activations/#available-activations) in Keras. Next, you can now pass in the forward computation to this activation in the `call()` method.
```
class SimpleDense(Layer):
# add an activation parameter
def __init__(self, units=32, activation=None):
super(SimpleDense, self).__init__()
self.units = units
# define the activation to get from the built-in activation layers in Keras
self.activation = tf.keras.activations.get(activation)
def build(self, input_shape): # we don't need to change anything in this method to add activation to our custom layer
w_init = tf.random_normal_initializer()
self.w = tf.Variable(name="kernel",
initial_value=w_init(shape=(input_shape[-1], self.units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(name="bias",
initial_value=b_init(shape=(self.units,), dtype='float32'),
trainable=True)
#super().build(input_shape)
def call(self, inputs):
# pass the computation to the activation layer
return self.activation(tf.matmul(inputs, self.w) + self.b)
```
We can now pass in an activation parameter to our custom layer. The string identifier is mostly the same as the function name so 'relu' below will get `tf.keras.activations.relu`.
```
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
SimpleDense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
```
| github_jupyter |
# Herramientas Estadisticas
# Contenido:
1.Estadistica:
- Valor medio.
- Mediana.
- Desviacion estandar.
2.Histogramas:
- Histrogramas con python.
- Histogramas con numpy.
- Como normalizar un histograma.
3.Distribuciones:
- Como obtener una distribucion a partir de un histograma.
- Distribucion Normal
- Distribucion de Poisson
- Distribucion Binomial
# 1. Estadistica
## Promedio
El promedio de una variable $x$ esta definado como:
$\bar{x} = \dfrac{\sum{x_i}}{N} $
## Mediana
La mediana de un conjunto de datos, es el valor al cual el conjunto de datos
se divide en dos:
Ejemplo:
sea $x$ = [1, 4, 7, 7, 3, 3, 1] la mediana de $median(x) = 3$
Formalmente la mediana se define como el valor $x_m$ que divide la funcion de probabilidad $F(x)$ en partes iguales.
$F(x_m) = \dfrac{1}{2}$
## El valor mas probable
Es el valor con mayor probabilidad $x_p$.
Ejemplo:
sea $x$ = [1, 4, 7, 7, 3, 2, 1] el valor mas probable es $x_p = 7$
```
import matplotlib.pyplot as plt
import numpy as np
# %pylab inline
def mi_mediana(lista):
x = sorted(lista)
d = int(len(x)/2)
if(len(x)%2==0):
return (x[d-1] + x[d])*0.5
else:
return x[d-1]
x_input = [1,3,4,5,5,7,7,6,8,6]
mi_mediana(x_input)
print(mi_mediana(x_input) == np.median(x_input))
```
## Problemas de no saber estadística
Este tipo de conceptos parecen sencillos. Pero no siempre son claros para todo el mundo.
```
x = np.arange(1, 12)
y = np.random.random(11)*10
plt.figure(figsize=(12, 5))
fig = plt.subplot(1, 2, 1)
plt.scatter(x, y, c='purple', alpha=0.8, s=60)
y_mean = np.mean(y)
y_median = np.median(y)
plt.axhline(y_mean, c='g', lw=3, label=r"$\rm{Mean}$")
plt.axhline(y_median, c='r', lw=3, label=r"$\rm{Median}$")
plt.legend(fontsize=20)
fig = plt.subplot(1, 2, 2)
h = plt.hist(x, alpha=0.6, histtype='bar', ec='black')
print(y)
```
# Desviacion estandar
Es el promedio de las incertidumbres de las mediciones $x_i$
$\sigma = \sqrt{\dfrac{1}{n-1} \sum(x_{i} - \bar{x})^2}$
Donde $n$ es el número de la muestra
Adicionalmente la ${\bf{varianza}}$ se define como:
$\bar{x^2} - \bar{x}^{2}$
$\sigma^2 = \dfrac{1}{N} \sum(x_{i} - \bar{x})^2$
Y es una medida similar a la desviacion estandar que da cuenta de la
dispersion de los datos alrededor del promedio.
Donde $N$ es la población total.
# Función de Correlación
$cor(x, y) = \dfrac{<({(x-\bar{x})(y-\bar{y})})>}{\sigma_x \sigma_{y}} $
# Ejercicio:
Compruebe si se cumplen las siguientes propiedades:
1. Cor(X,Y) = Cor(Y, X)
2. Cor(X,X) = 1
3. Cor(X,-X) = -1
4. Cor(aX+b, cY + d) = Cor(X, Y), si a y c != 0
```
x = np.arange(1, 12)
y = np.random.random(11)*10
plt.figure(figsize=(9, 5))
y_mean = np.mean(y)
y_median = np.median(y)
plt.axhline(y_mean, c='g', lw=3, label=r"$\rm{Mean}$")
plt.axhline(y_median, c='r', lw=3, label=r"$\rm{Median}$")
sigma_y = np.std(y)
plt.axhspan(y_mean-sigma_y, y_mean + sigma_y, facecolor='g', alpha=0.5, label=r"$\rm{\sigma}$")
plt.legend(fontsize=20)
plt.scatter(x, y, c='purple', alpha=0.8, s=60)
plt.ylim(-2, 14)
print ("Variancia = ", np.var(y))
print ("Desviacion estandar = ", np.std(y))
```
## Referencias:
Para mas funciones estadisticas que se pueden usar en python ver:
- NumPy: http://docs.scipy.org/doc/numpy/reference/routines.statistics.html
- SciPy: http://docs.scipy.org/doc/scipy/reference/stats.html
# Histogramas
## 1. hist
hist es una funcion de python que genera un histograma a partir de un array de datos.
```
x = np.random.random(200)
plt.subplot(2,2,1)
plt.title("A simple hist")
h = plt.hist(x)
plt.subplot(2,2,2)
plt.title("bins")
h = plt.hist(x, bins=20)
plt.subplot(2,2,3)
plt.title("alpha")
h = plt.hist(x, bins=20, alpha=0.6)
plt.subplot(2,2,4)
plt.title("histtype")
h = plt.hist(x, bins=20, alpha=0.6, histtype='stepfilled')
```
## 2. Numpy-histogram
```
N, bins = np.histogram(caras, bins=15)
plt.plot(bins[0:-1], N)
```
# Histogramas 2D
```
x = np.random.random(500)
y = np.random.random(500)
plt.subplot(4, 2, 1)
plt.hexbin(x, y, gridsize=15, cmap="gray")
plt.colorbar()
plt.subplot(4, 2, 2)
data = plt.hist2d(x, y, bins=15, cmap="binary")
plt.colorbar()
plt.subplot(4, 2, 3)
plt.hexbin(x, y, gridsize=15)
plt.colorbar()
plt.subplot(4, 2, 4)
data = plt.hist2d(x, y, bins=15)
plt.colorbar()
```
# Como normalizar un histograma.
Normalizar un histograma significa que la integral del histograma sea 1.
```
x = np.random.random(10)*4
plt.title("Como no normalizar un histograma", fontsize=25)
h = plt.hist(x, normed="True")
print ("El numero tamaño del bin debe de ser de la unidad")
plt.title("Como normalizar un histograma", fontsize=25)
h = hist(x, normed="True", bins=4)
```
Cual es la probabilidad de sacar 9 veces cara en 10 lanzamientos?
# Distribución de Probabilidad:
Las distribuciones de probabilidad dan información de cual es la probabilidad de que una variable aleatoria $x$ aprezca en un intervalo dado. ¿Si tenemos un conjunto de datos como podemos conocer la distribucion de probabilidad?
```
x = np.random.random(100)*10
plt.subplot(1, 2, 1)
h = plt.hist(x)
plt.subplot(1, 2, 2)
histo, bin_edges = np.histogram(x, density=True)
plt.bar(bin_edges[:-1], histo, width=1)
plt.xlim(min(bin_edges), max(bin_edges))
```
# Distribución Normal: Descripcion Matemática.
$f(x, \mu, \sigma) = \dfrac{1}{\sigma \sqrt(2\pi)} e^{-\dfrac{(x-\mu)^2}{2\sigma^2}} $
donde $\sigma$ es la desviacion estandar y $\mu$ la media de los datos $x$
Es una función de distribucion de probabilidad que esta totalmente determinada por los parametros $\mu$ y $\sigma$.
La funcion es simetrica alrededor de $\mu$.
En python podemos usar scipy para hacer uso de la función normal.
```
import scipy.stats
x = np.linspace(0, 1, 100)
n_dist = scipy.stats.norm(0.5, 0.1)
plt.plot(x, n_dist.pdf(x))
```
## Podemos generar numeros aleatorios con una distribucion normal:
```
x = np.random.normal(0.0, 1.0, 1000)
y = np.random.normal(0.0, 2.0, 1000)
w = np.random.normal(0.0, 3.0, 1000)
z = np.random.normal(0.0, 4.0, 1000)
histo = plt.hist(z, alpha=0.2, histtype="stepfilled", color='r')
histo = plt.hist(w, alpha=0.4, histtype="stepfilled", color='b')
histo = plt.hist(y, alpha=0.6, histtype="stepfilled", color='k')
histo = plt.hist(x, alpha=0.8, histtype="stepfilled", color='g')
plt.title(r"$\rm{Distribuciones\ normales\ con\ diferente\ \sigma}$", fontsize=20)
```
**Intervalo de confianza**
$\sigma_1$ = 68% de los datos van a estar dentro de 1$\sigma$
$\sigma_2$ = 95% de los datos van a estar dentro de 2$\sigma$
$\sigma_3$ = 99.7% de los datos van a estar dentro de 3$\sigma$
### Ejercicio: Generen distribuciones normales con:
- $\mu = 5$ y $\sigma = 2$
- $\mu = -3$ y $\sigma = -2$
- $\mu = 4$ y $\sigma = 5$
#### Grafiquen las PDF,CDF sobre los mismos ejes, con distintos colores y leyendas. Qué observan? (Una gráfica con PDF y otra con CDF).
# Ejercicio:
1. Realize graficas de:
1. Diferencia de Caras - Sellos para 40 y 20 mediciones cada una con mayor numero de lanzamientos que la anterior. (abs(cara-sello)vs Numero de lanzamientos)
2. La razon (sara/sello) en funcion del Numero de lanzamientos.
Comente los resultados.
2. Repita los graficos anteriores pero ahora hagalos en escala logaritmica.
Comente los resultados.
3. Haga graficos de el promedio de abs(cara - sello) en funcion del numero de lanzamientos en escala logaritmica.
y otro con el promedio de (cara/sello).
Comente los reultados.
4. Repita el punto anterior pero esta vez con la desviación estandar.
comente los resultados.
Imaginemos por un momento el siguiente experimento:
Queremos estudiar la probabilidad de que al lanzar una moneda obtengamos cara o sello, de antamento sabemos que esta es del 50%.
Pero analizemos un poco mas a fondo, ¿Cual será la probabilidad de sacar 10 caras consecutivas?
Para responder proponemos el siguiente método:
1. Lanzamos una moneda 10 veces y miramos si sale cara o sello y guardamos estos datos.
2. Repetimos este procedimiento y 1000 veces.
## Funcion que lanza la moneda N veces.
```
def coinflip(N):
cara = 0
sello = 0
i=0
while i < N:
x = np.random.randint(0, 10)/5.0
if x >= 1.0:
cara+=1
elif x<1.0:
sello+=1
i+=1
return cara/N, sello/N
```
## Función que hace M veces N lanzamientos.
```
def realizaciones(M, N):
caras=[]
for i in range(M):
x, y = coinflip(N)
caras.append(x)
return caras
hist(caras, normed=True, bins=20)
caras = realizaciones(100000, 30.)
```
# PDF
```
N, bins = np.histogram(x, density=True)
plt.plot(bins[0:-1], N)
```
# CDF
```
h = plt.hist(x, cumulative=True, bins=20)
```
# References:
- Ejemplo de la Moneda: Introduction to computation and programming using Python. , John Guttag. Pagina 179.
- Ejemplos de estadistica en python: http://nbviewer.ipython.org/github/dhuppenkothen/ClassicalStatsPython/blob/master/classicalstatsexamples.ipynb
- Para ver una derivación matematica: A Modern course in Statistical Physics, Reichl, Pagina 191.
| github_jupyter |
```
import tensorflow as tf
from tensorflow.python.keras.utils import HDF5Matrix
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import (Input, Lambda, Conv2D, MaxPooling2D, Flatten, Dense, Dropout,
Lambda, Activation, BatchNormalization, concatenate, UpSampling2D,
ZeroPadding2D)
from sklearn.metrics import mean_squared_error, mean_absolute_error, confusion_matrix
from matplotlib import pyplot as plt
%matplotlib inline
import numpy as np
```
```Python
x_train = HDF5Matrix("data.h5", "x_train")
x_valid = HDF5Matrix("data.h5", "x_valid")
```
shapes should be:
* (1355578, 432, 560, 1)
* (420552, 432, 560, 1)
```
def gen_data(shape=0, name="input"):
data = np.random.rand(512, 512, 4)
label = data[:,:,-1]
return tf.constant(data.reshape(1,512,512,4).astype(np.float32)), tf.constant(label.reshape(1,512,512,1).astype(np.float32))
## NOTE:
## Tensor 4D -> Batch,X,Y,Z
## Tesnor max. float32!
d, l = gen_data(0,0)
print(d.shape, l.shape)
def unet():
inputs, label = gen_data()
input_shape = inputs.shape
#down0a = Conv2D(16, (3, 3), padding='same')(inputs)
down0a = Conv2D(16, kernel_size=(3, 3), padding='same', input_shape=input_shape)(inputs)
down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
print("down0a.shape:",down0a.shape,"\ndwnpool.shap:", down0a_pool.shape)#?!? letztes != Batch?
#dim0 = Batch
#dim1,dim2 = X,Y
#dim3 = Kanaele
up1 = UpSampling2D((3, 3))(down0a)
print("upsamp.shape:",up1.shape) #UpSampling ändert dim1, dim2... somit (?,X,Y,?) evtl. Batch auf dim0 ?
unet()
def unet2(input_shape, output_length):
inputs = Input(shape=input_shape, name="input")
# 512
down0a = Conv2D(16, (3, 3), padding='same')(inputs)
down0a = BatchNormalization()(down0a)
down0a = Activation('relu')(down0a)
down0a = Conv2D(16, (3, 3), padding='same')(down0a)
down0a = BatchNormalization()(down0a)
down0a = Activation('relu')(down0a)
down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
# 256
down0 = Conv2D(32, (3, 3), padding='same')(down0a_pool)
down0 = BatchNormalization()(down0)
down0 = Activation('relu')(down0)
down0 = Conv2D(32, (3, 3), padding='same')(down0)
down0 = BatchNormalization()(down0)
down0 = Activation('relu')(down0)
down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
# 128
down1 = Conv2D(64, (3, 3), padding='same')(down0_pool)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1 = Conv2D(64, (3, 3), padding='same')(down1)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2 = Conv2D(128, (3, 3), padding='same')(down2)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
# 8
center = Conv2D(1024, (3, 3), padding='same')(down2_pool)
center = BatchNormalization()(center)
center = Activation('relu')(center)
center = Conv2D(1024, (3, 3), padding='same')(center)
center = BatchNormalization()(center)
center = Activation('relu')(center)
# center
up2 = UpSampling2D((2, 2))(center)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
# 64
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
# 128
up0 = UpSampling2D((2, 2))(up1)
up0 = concatenate([down0, up0], axis=3)
up0 = Conv2D(32, (3, 3), padding='same')(up0)
up0 = BatchNormalization()(up0)
up0 = Activation('relu')(up0)
up0 = Conv2D(32, (3, 3), padding='same')(up0)
up0 = BatchNormalization()(up0)
up0 = Activation('relu')(up0)
up0 = Conv2D(32, (3, 3), padding='same')(up0)
up0 = BatchNormalization()(up0)
up0 = Activation('relu')(up0)
# 256
up0a = UpSampling2D((2, 2))(up0)
up0a = concatenate([down0a, up0a], axis=3)
up0a = Conv2D(16, (3, 3), padding='same')(up0a)
up0a = BatchNormalization()(up0a)
up0a = Activation('relu')(up0a)
up0a = Conv2D(16, (3, 3), padding='same')(up0a)
up0a = BatchNormalization()(up0a)
up0a = Activation('relu')(up0a)
up0a = Conv2D(16, (3, 3), padding='same')(up0a)
up0a = BatchNormalization()(up0a)
up0a = Activation('relu')(up0a)
# 512
output = Conv2D(1, (1, 1), activation='relu')(up0a)
model = Model(inputs=inputs, outputs=output)
model.compile(loss="mean_squared_error", optimizer='adam')
return model
d = unet2((512,512,4),(512,512,1))
```
Anschließend:
```Python
output_length = 1
input_length = output_length + 1
input_shape=(432, 560, input_length)
model_1 = unet(input_shape, output_length)
model_1.fit(x_train_1, y_train_1, batch_size = 16, epochs = 25,
validation_data=(x_valid_1, y_valid_1))
```
```
d.summary()
#ToDo: now learn something!
```
| github_jupyter |
# Implementing a Neural Network
In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.
```
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.neural_net import TwoLayerNet
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
```
We will use the class `TwoLayerNet` in the file `cs231n/classifiers/neural_net.py` to represent instances of our network. The network parameters are stored in the instance variable `self.params` where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
```
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
```
# Forward pass: compute scores
Open the file `cs231n/classifiers/neural_net.py` and look at the method `TwoLayerNet.loss`. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters.
Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
```
scores = net.loss(X)
print 'Your scores:'
print scores
print
print 'correct scores:'
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print correct_scores
print
# The difference should be very small. We get < 1e-7
print 'Difference between your scores and correct scores:'
print np.sum(np.abs(scores - correct_scores))
```
# Forward pass: compute loss
In the same function, implement the second part that computes the data and regularizaion loss.
```
loss, _ = net.loss(X, y, reg=0.1)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print 'Difference between your loss and correct loss:'
print np.sum(np.abs(loss - correct_loss))
```
# Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables `W1`, `b1`, `W2`, and `b2`. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
```
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print param_grad_num.shape
print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))
```
# Train the network
To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function `TwoLayerNet.train` and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement `TwoLayerNet.predict`, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
```
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=1e-5,
num_iters=100, verbose=False)
print 'Final training loss: ', stats['loss_history'][-1]
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
```
# Load the data
Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.
```
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
```
# Train a network
To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
```
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.5, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc
```
# Debug the training
With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
```
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(net)
```
# Tune your hyperparameters
**What's wrong?**. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
**Tuning**. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
**Approximate results**. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.
**Experiment**: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).
```
best_net = None # store the best model into this
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
pass
#################################################################################
# END OF YOUR CODE #
#################################################################################
# visualize the weights of the best network
show_net_weights(best_net)
```
# Run on the test set
When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
**We will give you extra bonus point for every 1% of accuracy above 52%.**
```
test_acc = (best_net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.