repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringlengths 1
4
| size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
glennq/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 56 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 56 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | examples/svm/plot_svm_regression.py | 118 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 26651 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
theoryno3/scikit-learn | examples/linear_model/plot_logistic.py | 309 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/setup.py | 23 | 3025 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ray-project/ray | rllib/examples/bare_metal_policy_with_custom_view_reqs.py | 1 | 2682 | import argparse
import os
import ray
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.examples.policy.bare_metal_policy_with_custom_view_reqs import (
BareMetalPolicyWithCustomViewReqs,
)
from ray import air, tune
def get_cli_args():
"""Create CLI parser and return parsed arguments"""
parser = argparse.ArgumentParser()
# general args
parser.add_argument(
"--run", default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument("--num-cpus", type=int, default=3)
parser.add_argument(
"--stop-iters", type=int, default=200, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=80.0,
help="Reward at which we stop training.",
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.",
)
args = parser.parse_args()
print(f"Running with following CLI args: {args}")
return args
if __name__ == "__main__":
args = get_cli_args()
ray.init(num_cpus=args.num_cpus or None, local_mode=args.local_mode)
# Create q custom Algorithm class using our custom Policy.
class BareMetalPolicyAlgorithm(Algorithm):
@classmethod
def get_default_policy_class(cls, config):
return BareMetalPolicyWithCustomViewReqs
config = (
AlgorithmConfig()
.environment("CartPole-v1")
.rollouts(num_rollout_workers=1, create_env_on_local_worker=True)
.training(
model={
# Necessary to get the whole trajectory of 'state_in_0' in the
# sample batch.
"max_seq_len": 1,
}
)
.debugging(log_level="DEBUG")
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
# NOTE: Does this have consequences?
# I use it for not loading tensorflow/pytorch.
config.framework_str = None
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
# Train the Algorithm with our policy.
results = tune.Tuner(
BareMetalPolicyAlgorithm,
param_space=config,
run_config=air.RunConfig(stop=stop),
).fit()
print(results.get_best_result())
| apache-2.0 |
sherpa/sherpa | sherpa/tests/test_data.py | 2 | 87392 | #
# Copyright (C) 2019, 2020, 2021, 2022
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import logging
import re
import warnings
import numpy
import pytest
from sherpa.data import Data, Data1D, DataSimulFit, Data1DInt, \
Data2D, Data2DInt, BaseData, IntegratedDataSpace2D, Filter
from sherpa.models import Polynom1D, Polynom2D
from sherpa.utils.err import NotImplementedErr, DataErr
from sherpa.ui.utils import Session
from sherpa.astro.ui.utils import Session as AstroSession
NAME = "data_test"
X_ARRAY = numpy.arange(0, 10, 1)
Y_ARRAY = numpy.arange(100, 110, 1)
X0_2D_RAW, X1_2D_RAW = numpy.meshgrid(X_ARRAY, X_ARRAY)
Y_2D_RAW = X0_2D_RAW + X1_2D_RAW
Y_2D = Y_2D_RAW.ravel()
X0_2D, X1_2D = X0_2D_RAW.ravel(), X1_2D_RAW.ravel()
SHAPE_2D = X_ARRAY.size, X_ARRAY.size
SYSTEMATIC_ERROR_ARRAY = numpy.arange(0, 0.10, 0.01)
STATISTICAL_ERROR_ARRAY = numpy.arange(0, 1, 0.1)
SYS_ERROR_2D = Y_2D / 10
STAT_ERROR_2D = Y_2D / 5
X_THRESHOLD = 3
MULTIPLIER = 2
# Make sure we don't change these accidentally by changing an objects values
X_ARRAY.setflags(write=False)
Y_ARRAY.setflags(write=False)
SYSTEMATIC_ERROR_ARRAY.setflags(write=False)
STATISTICAL_ERROR_ARRAY.setflags(write=False)
X0_2D.setflags(write=False)
X1_2D.setflags(write=False)
Y_2D.setflags(write=False)
SYS_ERROR_2D.setflags(write=False)
STAT_ERROR_2D.setflags(write=False)
DATA_1D_CLASSES = (Data1D, Data1DInt)
DATA_2D_CLASSES = (Data2D, Data2DInt)
ALL_DATA_CLASSES = DATA_1D_CLASSES + DATA_2D_CLASSES
REALLY_ALL_DATA_CLASSES = (Data, ) + ALL_DATA_CLASSES
DATA_ARGS = NAME, (X_ARRAY,), Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY
DATA1D_ARGS = NAME, X_ARRAY, Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY
DATA1DINT_ARGS = NAME, X_ARRAY - 0.5, X_ARRAY + 0.5, Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY
DATA2D_ARGS = NAME, X0_2D, X1_2D, Y_2D, SHAPE_2D, STAT_ERROR_2D, SYS_ERROR_2D
DATA2DINT_ARGS = NAME, X0_2D - 0.5, X1_2D - 0.5, X0_2D + 0.5, X1_2D + 0.5, Y_2D, SHAPE_2D, STAT_ERROR_2D, SYS_ERROR_2D
EMPTY_DATA_OBJECTS = [(Data1D, [None] * 2),
(Data1DInt, [None] * 3),
(Data2D, [None] * 3),
(Data2DInt, [None] * 5)]
INSTANCE_ARGS = {
Data1D: DATA1D_ARGS,
Data: DATA_ARGS,
Data1DInt: DATA1DINT_ARGS,
Data2D: DATA2D_ARGS,
Data2DInt: DATA2DINT_ARGS
}
POS_X_ARRAY = {
Data1D: 1,
Data: 1,
Data1DInt: 1,
Data2D: 1,
Data2DInt: 1,
}
POS_Y_ARRAY = {
Data1D: 2,
Data: 2,
Data1DInt: 3,
Data2D: 3,
Data2DInt: 5,
}
POS_STATERR_ARRAY = {
Data1D: 3,
Data: 3,
Data1DInt: 4,
Data2D: 5,
Data2DInt: 7,
}
POS_SYSERR_ARRAY = {
Data1D: 4,
Data: 4,
Data1DInt: 5,
Data2D: 6,
Data2DInt: 8,
}
@pytest.fixture
def data(request):
data_class = request.param
return data_class(*INSTANCE_ARGS[data_class])
@pytest.fixture
def data_copy(request):
data_class = request.param
# At present we allow the fields of the data object to use
# the input arguments, rather than copying them. As the
# arguments in INSTANCE_ARGS have been marked read-only we
# need to explicitly copy them to make them writeable for
# those tests where we want to change the elements.
#
args = list(INSTANCE_ARGS[data_class])
for i in range(1, len(args)):
try:
args[i] = args[i].copy()
except AttributeError:
pass
return data_class(*args)
@pytest.fixture
def data_no_errors(request):
data_class = request.param
# Use the normal arguments but remove the error values
all_args = INSTANCE_ARGS[data_class]
no_errors = all_args[:POS_STATERR_ARRAY[data_class]]
out = data_class(*no_errors)
assert out.staterror is None
assert out.syserror is None
return out
@pytest.fixture
def data_simul_fit():
data_one = Data1D("data_one", X_ARRAY, Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY)
data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY,
MULTIPLIER * STATISTICAL_ERROR_ARRAY, MULTIPLIER * SYSTEMATIC_ERROR_ARRAY)
return DataSimulFit(NAME, (data_one, data_two))
@pytest.fixture
def data_simul_fit_no_errors():
data_one = Data1D("data_one", X_ARRAY, Y_ARRAY)
data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY)
return DataSimulFit(NAME, (data_one, data_two))
@pytest.fixture
def data_simul_fit_some_errors():
data_one = Data1D("data_one", X_ARRAY, Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY)
data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY)
return DataSimulFit(NAME, (data_one, data_two))
@pytest.mark.xfail
def test_base_data_instantiation():
with pytest.raises(NotImplementedErr):
BaseData()
@pytest.mark.parametrize("data", (Data, Data2D, Data2DInt), indirect=True)
def test_data_get_x(data):
with pytest.raises(AttributeError):
data.get_x()
@pytest.mark.xfail
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_get_x_special(data):
# XFAIL: These classes still provide get_x
with pytest.raises(AttributeError):
data.get_x()
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_x0(data):
with pytest.raises(AttributeError):
data.get_x0()
@pytest.fixture
def data_for_load_arrays(request):
data_class = request.param
from sherpa.astro.ui.utils import Session
session = Session()
data_args = INSTANCE_ARGS[data_class]
args = data_args + (data_class,)
data = data_class(*data_args)
return session, args, data
@pytest.mark.parametrize("data_for_load_arrays", ALL_DATA_CLASSES, indirect=True)
def test_load_arrays(data_for_load_arrays):
session, args, data = data_for_load_arrays
session.load_arrays(*args)
new_data = session.get_data(data.name)
assert new_data is not data
# DATA-NOTE: Do we need an equality operator for data classes? These tests are very partial
numpy.testing.assert_array_equal(new_data.get_indep(), data.get_indep())
numpy.testing.assert_array_equal(new_data.get_dep(), data.get_dep())
# DATA-NOTE: In the current Sherpa Data cannot be correctly loaded using load_arrays
@pytest.mark.xfail
@pytest.mark.parametrize("data_for_load_arrays", (Data, ), indirect=True)
def test_load_arrays_data(data_for_load_arrays):
session, args, _ = data_for_load_arrays
session.load_arrays(*args)
@pytest.mark.parametrize("data_no_errors", ALL_DATA_CLASSES, indirect=True)
def test_load_arrays_no_errors(data_no_errors):
from sherpa.astro.ui.utils import Session
session = Session()
data = data_no_errors
data_class = data.__class__
data_args = INSTANCE_ARGS[data_class]
args = data_args + (data_class,)
session.load_arrays(*args)
new_data = session.get_data(data.name)
assert new_data is not data
# DATA-NOTE: Do we need an equality operator for data classes? These tests are very partial
# Note that when they are created with load_arrays they seem to lose the name, which becomes the ID
numpy.testing.assert_array_equal(new_data.get_indep(), data.get_indep())
numpy.testing.assert_array_equal(new_data.get_dep(), data.get_dep())
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_x1(data):
with pytest.raises(AttributeError):
data.get_x1()
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_xlabel(data):
assert data.get_xlabel() == "x"
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data_get_x0label(data):
assert data.get_x0label() == "x0"
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data_get_x1label(data):
assert data.get_x1label() == "x1"
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_data_get_ylabel(data):
assert data.get_ylabel() == "y"
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_get_dims(data):
assert data.get_dims() == ((X_ARRAY.size, ), X_ARRAY.size)
@pytest.mark.parametrize("data_class,args", EMPTY_DATA_OBJECTS)
def test_data_len_empty(data_class, args):
data = data_class("empty", *args)
assert len(data) == 0
@pytest.mark.parametrize("Dataclass", REALLY_ALL_DATA_CLASSES)
def test_data_len(Dataclass):
args = list(INSTANCE_ARGS[Dataclass])
data = Dataclass(*args)
size = args[POS_Y_ARRAY[Dataclass]].size
assert len(data) == size
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_str_repr(data):
assert repr(data) == "<Data data set instance 'data_test'>"
assert str(data) == 'name = data_test\nindep = (array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),)\ndep ' \
'= Int64[10]\nstaterror = Float64[10]\nsyserror = Float64[10]'
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data1d_str_repr(data):
assert repr(data) == "<Data1D data set instance 'data_test'>"
assert str(data) == 'name = data_test\nx = Int64[10]\ny = Int64[10]\nstaterror = ' \
'Float64[10]\nsyserror = Float64[10]'
@pytest.mark.parametrize("data", (Data, Data1D), indirect=True)
def test_data_get_indep(data):
numpy.testing.assert_array_equal(data.get_indep(), [X_ARRAY, ])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_indep(data):
numpy.testing.assert_array_equal(data.get_indep(), (X_ARRAY-0.5, X_ARRAY+0.5))
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_get_indep_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
numpy.testing.assert_array_equal(data.get_indep(filter=True), [X_ARRAY[:X_THRESHOLD + 1], ])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_indep_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
expected = (X_ARRAY-0.5)[:X_THRESHOLD + 1], (X_ARRAY+0.5)[:X_THRESHOLD + 1]
numpy.testing.assert_array_equal(data.get_indep(filter=True), expected)
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_get_indep_ignore(data):
data.ignore(0, X_THRESHOLD)
numpy.testing.assert_array_equal(data.get_indep(filter=True), [X_ARRAY[X_THRESHOLD + 1:], ])
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_get_indep_ignore(data):
data.ignore((0, ), (X_THRESHOLD, ))
numpy.testing.assert_array_equal(data.get_indep(filter=True), [X_ARRAY[X_THRESHOLD + 1:], ])
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_get_indep_ignore_string_lower(data):
with pytest.raises(DataErr):
data.ignore("0", 1)
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_get_indep_ignore_string_lower(data):
with pytest.raises(DataErr):
data.ignore(("0", ), (1, ))
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_get_indep_ignore_string_upper(data):
with pytest.raises(DataErr):
data.ignore(0, "1")
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_get_indep_ignore_string_upper(data):
with pytest.raises(DataErr):
data.ignore((0, ), ("1", ))
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_get_indep_notice(data):
data.notice(0, X_THRESHOLD)
numpy.testing.assert_array_equal(data.get_indep(filter=True), [X_ARRAY[:X_THRESHOLD + 1], ])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_indep_notice(data):
data.notice(0, X_THRESHOLD)
expected = [(X_ARRAY-0.5)[:X_THRESHOLD + 1], (X_ARRAY+0.5)[:X_THRESHOLD + 1]]
actual = data.get_indep(filter=True)
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
@pytest.mark.parametrize("data", (Data, ), indirect=True)
def test_data_get_indep_notice(data):
data.notice((0, ), (X_THRESHOLD, ))
numpy.testing.assert_array_equal(data.get_indep(filter=True), [X_ARRAY[:X_THRESHOLD + 1], ])
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_get_indep_mask(data):
data.mask = X_ARRAY == 0
numpy.testing.assert_array_equal(data.get_indep(filter=True)[0], X_ARRAY[0])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_indep_mask(data):
data.mask = X_ARRAY == 0
numpy.testing.assert_array_equal(data.get_indep(filter=True), ([(X_ARRAY-0.5)[0]], [(X_ARRAY+0.5)[0]]))
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_get_indep_filter_mask(data):
data.mask = X_ARRAY <= X_THRESHOLD
data.mask = X_ARRAY == 0
numpy.testing.assert_array_equal(data.get_indep(filter=True)[0], [X_ARRAY[0]])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_indep_filter_mask(data):
data.mask = X_ARRAY <= X_THRESHOLD
data.mask = X_ARRAY == 0
numpy.testing.assert_array_equal(data.get_indep(filter=True), ([(X_ARRAY-0.5)[0]], [(X_ARRAY+0.5)[0]]))
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_dep_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
numpy.testing.assert_array_equal(data.get_dep(filter=True), Y_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", (Data1D, Data1DInt), indirect=True)
def test_data_set_dep_filter(data):
# This used to be [0, 1] but why would we want this, so check we
# can call set_dep with the expected argument size.
#
data.set_dep([0, 1] * 5)
numpy.testing.assert_array_equal(data.get_dep(filter=True), [0, 1] * 5)
# There's also support for scalar values.
data.set_dep(0)
numpy.testing.assert_array_equal(data.get_dep(filter=True), [0] * Y_ARRAY.size)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_staterror(data):
numpy.testing.assert_array_equal(data.get_staterror(), STATISTICAL_ERROR_ARRAY)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_staterror_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
numpy.testing.assert_array_equal(data.get_staterror(filter=True), STATISTICAL_ERROR_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data_no_errors", DATA_1D_CLASSES, indirect=True)
def test_data_get_staterror_func(data_no_errors):
data_no_errors.mask = X_ARRAY <= X_THRESHOLD
stat_error = data_no_errors.get_staterror(filter=False, staterrfunc=lambda x: MULTIPLIER * x) # type: numpy.ndarray
numpy.testing.assert_array_equal(stat_error, MULTIPLIER * Y_ARRAY)
@pytest.mark.parametrize("data_no_errors", DATA_1D_CLASSES, indirect=True)
def test_data_get_staterror_filter_func(data_no_errors):
data_no_errors.mask = X_ARRAY <= X_THRESHOLD
stat_error = data_no_errors.get_staterror(filter=True, staterrfunc=lambda x: MULTIPLIER * x) # type: numpy.ndarray
numpy.testing.assert_array_equal(stat_error, MULTIPLIER * Y_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_syserror(data):
numpy.testing.assert_array_equal(data.get_syserror(), SYSTEMATIC_ERROR_ARRAY)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_syserror_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
numpy.testing.assert_array_equal(data.get_syserror(filter=True), SYSTEMATIC_ERROR_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_error(data):
error = data.get_error()
expected_error = numpy.sqrt(SYSTEMATIC_ERROR_ARRAY ** 2 + STATISTICAL_ERROR_ARRAY ** 2)
numpy.testing.assert_array_equal(error, expected_error)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_yerr(data):
error = data.get_yerr()
expected_error = numpy.sqrt(SYSTEMATIC_ERROR_ARRAY ** 2 + STATISTICAL_ERROR_ARRAY ** 2)
numpy.testing.assert_array_equal(error, expected_error)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_dep(data):
numpy.testing.assert_array_equal(data.get_dep(), Y_ARRAY)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_y(data):
numpy.testing.assert_array_equal(data.get_y(), Y_ARRAY)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_get_y_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
numpy.testing.assert_array_equal(data.get_y(filter=True), Y_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_get_y_filter_func(data):
data.mask = X_ARRAY <= X_THRESHOLD
y = data.get_y(filter=True, yfunc=lambda x: MULTIPLIER*x)
expected_y = (Y_ARRAY[:X_THRESHOLD + 1], MULTIPLIER*X_ARRAY[:X_THRESHOLD + 1])
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_y_filter_func(data):
data.mask = X_ARRAY <= X_THRESHOLD
y = data.get_y(filter=True, yfunc=lambda x, y: (MULTIPLIER*x, MULTIPLIER*y))
expected_y = (Y_ARRAY[:X_THRESHOLD + 1], (MULTIPLIER*(X_ARRAY-0.5)[:X_THRESHOLD + 1],
MULTIPLIER*(X_ARRAY+0.5)[:X_THRESHOLD + 1]))
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_get_y_func(data):
y = data.get_y(filter=True, yfunc=lambda x: MULTIPLIER*x)
expected_y = (Y_ARRAY, MULTIPLIER*X_ARRAY)
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_y_func(data):
y = data.get_y(filter=True, yfunc=lambda x, y: (MULTIPLIER*x, MULTIPLIER*y))
expected_y = (Y_ARRAY, (MULTIPLIER*(X_ARRAY-0.5), MULTIPLIER*(X_ARRAY+0.5)))
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_eval_model(data):
model = Polynom1D()
model.c0 = 0
model.c1 = MULTIPLIER
evaluated_data = data.eval_model(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * X_ARRAY)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_eval_model_to_fit_no_filter(data):
model = Polynom1D()
model.c0 = 0
model.c1 = MULTIPLIER
evaluated_data = data.eval_model_to_fit(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * X_ARRAY)
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_eval_model_to_fit_filter(data):
model = Polynom1D()
model.c0 = 0
model.c1 = MULTIPLIER
data.mask = X_ARRAY <= X_THRESHOLD
evaluated_data = data.eval_model_to_fit(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * X_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_eval_model_to_fit_filter(data):
model = Polynom1D()
model.c0 = 0
model.c1 = MULTIPLIER
data.mask = X_ARRAY <= X_THRESHOLD
evaluated_data = data.eval_model_to_fit(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * X_ARRAY[:X_THRESHOLD + 1])
@pytest.mark.parametrize("data", (Data1D, Data), indirect=True)
def test_data_to_guess(data):
actual = data.to_guess()
expected = [Y_ARRAY, X_ARRAY]
numpy.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_to_guess(data):
actual = data.to_guess()
expected = [Y_ARRAY, X_ARRAY-0.5]
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_data_1d_to_fit(data):
actual = data.to_fit()
expected = [Y_ARRAY, STATISTICAL_ERROR_ARRAY, SYSTEMATIC_ERROR_ARRAY]
numpy.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_to_plot(data):
actual = data.to_plot()
yerr = numpy.sqrt(SYSTEMATIC_ERROR_ARRAY ** 2 + STATISTICAL_ERROR_ARRAY ** 2)
expected = [X_ARRAY, Y_ARRAY, yerr, None, "x", "y"]
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
numpy.testing.assert_array_equal(actual[2], expected[2])
numpy.testing.assert_array_equal(actual[3], expected[3])
numpy.testing.assert_array_equal(actual[4], expected[4])
numpy.testing.assert_array_equal(actual[5], expected[5])
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data_1d_to_component_plot(data):
actual = data.to_component_plot()
yerr = numpy.sqrt(SYSTEMATIC_ERROR_ARRAY ** 2 + STATISTICAL_ERROR_ARRAY ** 2)
expected = [X_ARRAY, Y_ARRAY, yerr, None, "x", "y"]
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
numpy.testing.assert_array_equal(actual[2], expected[2])
numpy.testing.assert_array_equal(actual[3], expected[3])
numpy.testing.assert_array_equal(actual[4], expected[4])
numpy.testing.assert_array_equal(actual[5], expected[5])
@pytest.mark.parametrize("data", (Data, Data1D, Data1DInt), indirect=True)
def test_data_to_contour(data):
with pytest.raises(AttributeError):
data.to_contour()
@pytest.mark.parametrize("data", (Data, Data2D, Data2DInt), indirect=True)
def test_data_to_plot(data):
with pytest.raises(AttributeError):
data.to_plot()
@pytest.mark.parametrize("data", (Data, Data2D, Data2DInt), indirect=True)
def test_data_to_component_plot(data):
with pytest.raises(AttributeError):
data.to_component_plot()
def test_data_simul_fit(data_simul_fit):
y, stat_error, systematic_error = data_simul_fit.to_fit()
expected_y = numpy.concatenate((Y_ARRAY, MULTIPLIER * Y_ARRAY))
expected_stat_error = numpy.concatenate((STATISTICAL_ERROR_ARRAY, MULTIPLIER * STATISTICAL_ERROR_ARRAY))
expected_sys_error = numpy.concatenate((SYSTEMATIC_ERROR_ARRAY, MULTIPLIER * SYSTEMATIC_ERROR_ARRAY))
numpy.testing.assert_array_equal(y, expected_y)
numpy.testing.assert_array_equal(stat_error, expected_stat_error)
numpy.testing.assert_array_equal(systematic_error, expected_sys_error)
def test_data_simul_fit_to_plot(data_simul_fit):
actual = data_simul_fit.to_fit()
expected_y = numpy.concatenate((Y_ARRAY, MULTIPLIER * Y_ARRAY))
expected_stat_error = numpy.concatenate((STATISTICAL_ERROR_ARRAY, MULTIPLIER * STATISTICAL_ERROR_ARRAY))
expected_sys_error = numpy.concatenate((SYSTEMATIC_ERROR_ARRAY, MULTIPLIER * SYSTEMATIC_ERROR_ARRAY))
numpy.testing.assert_array_equal(actual[0], expected_y)
numpy.testing.assert_array_equal(actual[1], expected_stat_error)
numpy.testing.assert_array_equal(actual[2], expected_sys_error)
def test_data_simul_fit_no_errors(data_simul_fit_no_errors):
y, stat_error, systematic_error = data_simul_fit_no_errors.to_fit()
expected_y = numpy.concatenate((Y_ARRAY, MULTIPLIER * Y_ARRAY))
numpy.testing.assert_array_equal(y, expected_y)
assert stat_error is None
assert systematic_error is None
def test_data_simul_fit_some_errors(data_simul_fit_some_errors):
with pytest.raises(DataErr):
data_simul_fit_some_errors.to_fit()
def test_data_simul_fit_eval_model_to_fit(data_simul_fit):
model = Polynom1D()
model.c0 = 0
model.c1 = MULTIPLIER
data_simul_fit.datasets[0].mask = X_ARRAY <= X_THRESHOLD
data_simul_fit.datasets[1].mask = X_ARRAY <= X_THRESHOLD
evaluated_data = data_simul_fit.eval_model_to_fit((model, model))
expected_data = numpy.concatenate((MULTIPLIER * X_ARRAY[:X_THRESHOLD+1],
MULTIPLIER**2 * X_ARRAY[:X_THRESHOLD+1]))
numpy.testing.assert_array_equal(evaluated_data, expected_data)
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_dims(data):
assert data.get_dims() == (X_ARRAY.size, )
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_filter(data):
data.mask = X_ARRAY <= X_THRESHOLD
assert data.get_filter() == '0.0000:3.0000'
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_filter_mask(data):
data.mask = X_ARRAY <= X_THRESHOLD
assert data.get_filter() == '0.0000:3.0000'
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_filter_expr(data):
data.mask = X_ARRAY <= X_THRESHOLD
assert data.get_filter_expr() == '0.0000-3.0000 x'
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_bounding_mask(data):
mask = X_ARRAY <= X_THRESHOLD
data.mask = mask
actual = data.get_bounding_mask()
numpy.testing.assert_array_equal(actual[0], mask)
numpy.testing.assert_array_equal(actual[1], X_ARRAY.size)
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_img(data):
numpy.testing.assert_array_equal(data.get_img(), [Y_ARRAY, ])
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_img_yfunc(data):
actual = data.get_img(yfunc=lambda x: MULTIPLIER * x)
expected = ([Y_ARRAY, ], [MULTIPLIER * X_ARRAY, ], )
numpy.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_imgerr(data):
expected_error = numpy.sqrt(SYSTEMATIC_ERROR_ARRAY ** 2 + STATISTICAL_ERROR_ARRAY ** 2)
numpy.testing.assert_array_equal(data.get_imgerr(), [expected_error, ])
@pytest.mark.parametrize("data", (Data1D,), indirect=True)
def test_data1d_get_imgerr_when_none(data):
# Clear out the errors
data.syserror = None
data.staterror = None
assert data.get_imgerr() is None
@pytest.mark.parametrize("data", (Data1D, Data1DInt), indirect=True)
def test_data1d_get_x(data):
numpy.testing.assert_array_equal(data.get_x(), X_ARRAY)
@pytest.mark.parametrize("data", (Data1D, ), indirect=True)
def test_data1d_get_xerr(data):
assert data.get_xerr() is None
@pytest.mark.parametrize("data", (Data1DInt, ), indirect=True)
def test_data_1d_int_get_xerr(data):
numpy.testing.assert_array_equal(data.get_xerr(), [1] * X_ARRAY.size)
@pytest.mark.parametrize("data", (Data1D, Data1DInt), indirect=True)
def test_data1d_get_y(data):
numpy.testing.assert_array_equal(data.get_y(), Y_ARRAY)
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_x0(data):
numpy.testing.assert_array_equal(data.get_x0(), X0_2D)
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_x0(data):
numpy.testing.assert_array_equal(data.get_x0(), X0_2D)
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_x1(data):
numpy.testing.assert_array_equal(data.get_x1(), X1_2D)
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_x1(data):
numpy.testing.assert_array_equal(data.get_x1(), X1_2D)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_dims(data):
assert data.get_dims() == (X_ARRAY.size, X_ARRAY.size)
# DATA-NOTE: Not sure this should work, really, as the 1D implementation does not account for the difference in 2D
# data, but in 2D it is hard with the current implementation to figure out the shape is self.shape is None
@pytest.mark.xfail
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_dims_no_shape(data):
data.shape = None
assert data.get_dims() == (X_ARRAY.size, X_ARRAY.size)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_axes(data):
numpy.testing.assert_array_equal(data.get_axes(), (X_ARRAY+1, X_ARRAY+1))
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_img(data):
numpy.testing.assert_array_equal(data.get_img(), Y_2D_RAW)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_imgerr(data):
expected_error = numpy.sqrt(STAT_ERROR_2D ** 2 + SYS_ERROR_2D ** 2).reshape(SHAPE_2D)
numpy.testing.assert_array_equal(data.get_imgerr(), expected_error)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_xerr(data):
with pytest.raises(AttributeError):
data.get_xerr()
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_dep_filter(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
numpy.testing.assert_array_equal(data.get_dep(filter=True), Y_2D[test_filter])
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_staterror(data):
numpy.testing.assert_array_equal(data.get_staterror(), STAT_ERROR_2D)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_staterror_filter(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
numpy.testing.assert_array_equal(data.get_staterror(filter=True), STAT_ERROR_2D[test_filter])
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_syserror(data):
numpy.testing.assert_array_equal(data.get_syserror(), SYS_ERROR_2D)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_syserror_filter(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
numpy.testing.assert_array_equal(data.get_syserror(filter=True), SYS_ERROR_2D[test_filter])
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_error(data):
error = data.get_error()
expected_error = numpy.sqrt(SYS_ERROR_2D ** 2 + STAT_ERROR_2D ** 2)
numpy.testing.assert_array_equal(error, expected_error)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_yerr(data):
error = data.get_yerr()
expected_error = numpy.sqrt(SYS_ERROR_2D ** 2 + STAT_ERROR_2D ** 2)
numpy.testing.assert_array_equal(error, expected_error)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_dep(data):
numpy.testing.assert_array_equal(data.get_dep(), Y_2D)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_y(data):
numpy.testing.assert_array_equal(data.get_y(), Y_2D)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_y_filter(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
numpy.testing.assert_array_equal(data.get_y(filter=True), Y_2D[test_filter])
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_y_filter_func(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
y = data.get_y(filter=True, yfunc=lambda x0, x1: MULTIPLIER*(x0 + x1))
expected_y = Y_2D[test_filter], (MULTIPLIER * (X0_2D + X1_2D))[test_filter]
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_img_func(data):
y = data.get_img(yfunc=lambda x0, x1: MULTIPLIER*(x0 + x1))
expected_y = Y_2D_RAW, MULTIPLIER * (X0_2D + X1_2D).reshape(data.shape)
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_y_filter_func(data):
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
y = data.get_y(filter=True, yfunc=lambda x0lo, x0hi, x1lo, x1hi: MULTIPLIER*((x0lo+x0hi)/2 + (x1lo+x1hi)/2))
expected_y = Y_2D[test_filter], (MULTIPLIER * (X0_2D + X1_2D))[test_filter]
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_img_func(data):
y = data.get_img(yfunc=lambda x0lo, x0hi, x1lo, x1hi: MULTIPLIER*((x0lo+x0hi)/2 + (x1lo+x1hi)/2))
expected_y = Y_2D_RAW, MULTIPLIER * (X0_2D + X1_2D).reshape(data.shape)
numpy.testing.assert_array_equal(y[0], expected_y[0])
numpy.testing.assert_array_equal(y[1], expected_y[1])
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_eval_model(data):
model = Polynom2D()
model.c = 0
model.cy1 = MULTIPLIER
model.cx1 = MULTIPLIER
evaluated_data = data.eval_model(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * (X0_2D + X1_2D))
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_eval_model_to_fit_no_filter(data):
model = Polynom2D()
model.c = 0
model.cy1 = MULTIPLIER
model.cx1 = MULTIPLIER
evaluated_data = data.eval_model_to_fit(model)
numpy.testing.assert_array_equal(evaluated_data, MULTIPLIER * (X0_2D + X1_2D))
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_eval_model_to_fit_filter(data):
model = Polynom2D()
model.c = 0
model.cy1 = MULTIPLIER
model.cx1 = MULTIPLIER
test_filter = X0_2D <= X_THRESHOLD
data.mask = test_filter
evaluated_data = data.eval_model_to_fit(model)
numpy.testing.assert_array_equal(evaluated_data, (MULTIPLIER * (X0_2D + X1_2D))[test_filter])
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_max_pos(data):
numpy.testing.assert_array_equal(data.get_max_pos(), (X_ARRAY.size-1, X_ARRAY.size-1))
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data2_get_max_pos_dep(data):
dep = 1/(Y_2D+1) # +1 to avoid dividing by zero
numpy.testing.assert_array_equal(data.get_max_pos(dep=dep), (0, 0))
# DATA-NOTE: This is failing because Data2D.notice isn't implemented correctly and it just combines the
# Masks on the two axes into one, i.e. mask_x0 && mask_x1 is applied to both axes.
# We probably never noticed because DataIMG defines a notice2d method which we always use.
@pytest.mark.xfail
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_indep_notice(data):
test_filter_0 = X0_2D <= X_THRESHOLD
test_filter_1 = X1_2D <= X_THRESHOLD + 1
data.notice(0, X_THRESHOLD, 0, X_THRESHOLD + 1)
expected = [X0_2D[test_filter_0], X1_2D[test_filter_1]]
actual = data.get_indep(filter=True)
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
# DATA-NOTE: This is failing for a different reason (can't get_indep(filter=True) in the first place).
# Not sure whether I am doing something wrong, but it's unlikely, since the Data2DInt.notice()
# signature seems consistent with what I am doing. In any case the problem is that at some point the
# Data2DInt.mask is a (10, 10) array, while the shape of the data is (100, )
@pytest.mark.xfail
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_indep_notice(data):
test_filter_0 = X0_2D <= X_THRESHOLD
test_filter_1 = X1_2D <= X_THRESHOLD + 1
data.notice(0, X_THRESHOLD, 0, X_THRESHOLD + 1)
expected = [(X0_2D - 0.5)[test_filter_0],
(X0_2D + 0.5)[test_filter_0],
(X1_2D - 0.5)[test_filter_1],
(X1_2D + 0.5)[test_filter_1]]
actual = data.get_indep(filter=True)
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
# DATA-NOTE: This is just a notice call in disguise, so it's failing like just above.
@pytest.mark.xfail
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_indep_ignore(data):
test_filter_0 = X0_2D > X_THRESHOLD
test_filter_1 = X1_2D > X_THRESHOLD + 1
data.ignore(0, X_THRESHOLD, 0, X_THRESHOLD + 1)
expected = [X0_2D[test_filter_0], X1_2D[test_filter_1]]
actual = data.get_indep(filter=True)
numpy.testing.assert_array_equal(actual[0], expected[0])
numpy.testing.assert_array_equal(actual[1], expected[1])
@pytest.mark.parametrize("data", (Data2D, ), indirect=True)
def test_data2_get_indep_mask(data):
test_filter = X0_2D == 0
data.mask = test_filter
expected = (X0_2D[test_filter], X1_2D[test_filter])
numpy.testing.assert_array_equal(data.get_indep(filter=True), expected)
# DATA-NOTE: this fails because get_indep() does not work. Either I am missing something fundamental
# or the Data2DInt methods are bogus
@pytest.mark.xfail
@pytest.mark.parametrize("data", (Data2DInt, ), indirect=True)
def test_data2_int_get_indep_mask(data):
test_filter = X0_2D == 0
data.mask = test_filter
expected = (X0_2D[test_filter], X1_2D[test_filter])
numpy.testing.assert_array_equal(data.get_indep(filter=True), expected)
@pytest.fixture
def array_sizes_fixture():
x0low, x0high = 3000, 4000
x1low, x1high = 4000, 4800
dx = 500
x1, x0 = numpy.mgrid[x1low:x1high:dx, x0low:x0high:dx]
y = (x0 - 3500) ** 2 + (x1 - 4500) ** 2
return x0, x1, dx, y
# https://github.com/sherpa/sherpa/issues/627
def test_data2d_wrong_array_size(array_sizes_fixture):
x0, x1, dx, y = array_sizes_fixture
with pytest.raises(DataErr,
match="Array must be 1D"):
Data2D('name', x0, x1, y.flatten(), staterror=numpy.sqrt(y).flatten())
def test_data2d_wrong_y_array_size(array_sizes_fixture):
x0, x1, dx, y = array_sizes_fixture
with pytest.raises(DataErr,
match="Array must be 1D"):
Data2D('name', x0.flatten(), x1.flatten(), y, staterror=numpy.sqrt(y).flatten())
def test_data2d_int_wrong_array_size(array_sizes_fixture):
x0, x1, dx, y = array_sizes_fixture
with pytest.raises(DataErr,
match="Array must be 1D"):
Data2DInt('name', x0, x0, x1, x1, y.flatten(), staterror=numpy.sqrt(y).flatten())
def test_data2d_int_wrong_y_array_size(array_sizes_fixture):
x0, x1, dx, y = array_sizes_fixture
with pytest.raises(DataErr,
match="Array must be 1D"):
Data2DInt('name', x0.flatten(), x0.flatten(), x1.flatten(), x1.flatten(), y, staterror=numpy.sqrt(y).flatten())
# https://github.com/sherpa/sherpa/issues/628
def test_data2d_int_eval_model_to_fit(array_sizes_fixture):
from sherpa.fit import Fit
from sherpa.optmethods import LevMar
from sherpa.stats import Chi2
from sherpa.models import Gauss2D
x0, x1, dx, y = array_sizes_fixture
data2 = Data2DInt('name', x0.flatten(), x1.flatten(),
x0.flatten() + dx, x1.flatten() + dx,
y.flatten(),
staterror=numpy.sqrt(y).flatten())
model2 = Gauss2D()
fitter = Fit(data2, model2, Chi2(), LevMar())
fitter.fit() # Failed in Sherpa 4.11.0
# https://github.com/sherpa/sherpa/issues/695
@pytest.mark.parametrize('arrpos', [POS_X_ARRAY])
@pytest.mark.parametrize("Dataclass", ALL_DATA_CLASSES)
def test_data_indep_masked_numpyarray(arrpos, Dataclass):
i = arrpos[Dataclass]
args = list(INSTANCE_ARGS[Dataclass])
mask = numpy.random.rand(*(args[i].shape)) > 0.5
args[i] = numpy.ma.array(args[i], mask=mask)
with pytest.warns(UserWarning, match="for dependent variables only"):
data = Dataclass(*args)
assert len(data.get_dep(filter=True)) == len(args[POS_Y_ARRAY[Dataclass]])
@pytest.mark.parametrize('arrpos', [POS_STATERR_ARRAY, POS_SYSERR_ARRAY])
@pytest.mark.parametrize("Dataclass", ALL_DATA_CLASSES)
def test_data_err_masked_numpyarray(arrpos, Dataclass):
i = arrpos[Dataclass]
args = list(INSTANCE_ARGS[Dataclass])
mask = numpy.random.rand(*(args[i].shape)) > 0.5
args[i] = numpy.ma.array(args[i], mask=mask)
with pytest.warns(UserWarning, match=" differs from the dependent array, "):
data = Dataclass(*args)
assert len(data.get_dep(filter=True)) == len(args[POS_Y_ARRAY[Dataclass]])
@pytest.mark.parametrize('arrpos', [POS_STATERR_ARRAY, POS_SYSERR_ARRAY])
@pytest.mark.parametrize("Dataclass", ALL_DATA_CLASSES)
def test_data_deperr_masked_numpyarray(arrpos, Dataclass):
'''Error arrays can be masked as long as that mask is the same as the dependent array'''
i = arrpos[Dataclass]
j = POS_Y_ARRAY[Dataclass]
args = list(INSTANCE_ARGS[Dataclass])
mask = numpy.random.rand(*(args[i].shape)) > 0.5
args[i] = numpy.ma.array(args[i], mask=mask)
args[j] = numpy.ma.array(args[j], mask=mask)
data = Dataclass(*args)
assert len(data.get_dep(filter=True)) == (~mask).sum()
@pytest.mark.parametrize("Dataclass", REALLY_ALL_DATA_CLASSES)
def test_data_dep_masked_numpyarray(Dataclass):
args = list(INSTANCE_ARGS[Dataclass])
posy = POS_Y_ARRAY[Dataclass]
mask = numpy.random.rand(*(args[posy].shape)) > 0.5
args[posy] = numpy.ma.array(args[posy], mask=mask)
data = Dataclass(*args)
assert data.mask.shape == mask.shape
assert numpy.all(data.mask == ~mask)
assert len(data.get_dep(filter=True)) == (~mask).sum()
@pytest.mark.parametrize("Dataclass", REALLY_ALL_DATA_CLASSES)
def test_data_dep_masked_numpyarray_nomask(Dataclass):
args = list(INSTANCE_ARGS[Dataclass])
posy = POS_Y_ARRAY[Dataclass]
# By default, numpy creates a masked array with no mask set
args[posy] = numpy.ma.array(args[posy])
data = Dataclass(*args)
# Sherpa's way of saying "mask is not set"
assert data.mask is True
assert len(data.get_dep(filter=True)) == len(args[posy].flatten())
@pytest.mark.parametrize("Dataclass", ALL_DATA_CLASSES)
def test_data_indep_anyobj_with_mask(Dataclass):
args = list(INSTANCE_ARGS[Dataclass])
class DummyMask(list):
mask = 'whatisthis'
args[1] = DummyMask(args[1])
with pytest.warns(UserWarning, match="for dependent variables only"):
data = Dataclass(*args)
assert data.mask is True
assert len(data.get_dep(filter=True)) == len(args[POS_Y_ARRAY[Dataclass]])
@pytest.mark.parametrize("Dataclass", REALLY_ALL_DATA_CLASSES)
def test_data_dep_any_obj_with_mask(Dataclass):
args = list(INSTANCE_ARGS[Dataclass])
posy = POS_Y_ARRAY[Dataclass]
class DummyMask(list):
mask = 'whatisthis'
args[posy] = DummyMask(args[posy])
with pytest.warns(UserWarning, match="Set .mask"):
data = Dataclass(*args)
assert data.mask is True
assert len(data.get_dep(filter=True)) == len(data.get_dep(filter=False))
# repeat set of tests except now by using ui
# Results should be idendical, but tests are fast, so we just test again
# To make sure that there is no heuristic in load_arrays or similar that
# interferes with the logic
@pytest.mark.parametrize('arrpos', [POS_X_ARRAY, POS_STATERR_ARRAY, POS_SYSERR_ARRAY])
@pytest.mark.parametrize('Session', [Session, AstroSession])
@pytest.mark.parametrize("data_for_load_arrays", ALL_DATA_CLASSES, indirect=True)
def test_data_indeperr_masked_numpyarray_ui(arrpos, data_for_load_arrays, Session):
session, args, data = data_for_load_arrays
session = Session()
i = arrpos[type(data)]
mask = numpy.random.rand(*(args[i].shape)) > 0.5
args = list(args)
args[1] = numpy.ma.array(args[i], mask=mask)
with pytest.warns(UserWarning, match="for dependent variables only"):
session.load_arrays(*args)
new_data = session.get_data(data.name)
assert len(new_data.get_dep(filter=True)) == len(args[i])
@pytest.mark.parametrize('Session', [Session, AstroSession])
@pytest.mark.parametrize("data_for_load_arrays", ALL_DATA_CLASSES, indirect=True)
def test_data_dep_masked_numpyarray_ui(data_for_load_arrays, Session):
session, args, data = data_for_load_arrays
session = Session()
posy = POS_Y_ARRAY[type(data)]
mask = numpy.random.rand(*(args[posy].shape)) > 0.5
args = list(args)
args[posy] = numpy.ma.array(args[posy], mask=mask)
session.load_arrays(*args)
new_data = session.get_data(data.name)
assert new_data.mask.shape == mask.shape
assert numpy.all(new_data.mask == ~mask)
assert len(new_data.get_dep(filter=True)) == (~mask).sum()
@pytest.mark.parametrize('Session', [Session, AstroSession])
@pytest.mark.parametrize("data_for_load_arrays", ALL_DATA_CLASSES, indirect=True)
def test_data_dep_masked_numpyarray_nomask_ui(data_for_load_arrays, Session):
session, args, data = data_for_load_arrays
session = Session()
posy = POS_Y_ARRAY[type(data)]
args = list(args)
args[posy] = numpy.ma.array(args[posy])
session.load_arrays(*args)
new_data = session.get_data(data.name)
# Sherpa's way of saying "mask is not set"
assert new_data.mask is True
assert len(new_data.get_dep(filter=True)) == len(args[posy].flatten())
# https://github.com/sherpa/sherpa/issues/346
@pytest.mark.parametrize('Session', [Session, AstroSession])
def test_regression_346(Session):
session = Session()
x = numpy.arange(-5, 5.1)
old_y = x*x + 23.2
y = numpy.ma.masked_array(old_y, mask=old_y < 35)
e = numpy.ones(x.size)
session.load_arrays("mydata", x, y, e)
filtered_y = session. get_dep("mydata", filter=True)
assert numpy.allclose(filtered_y, [48.2, 39.2, 39.2, 48.2])
def test_manual_setting_mask():
d = Data1D(name='test', x=[1, 2, 3], y=[0, 0, 0])
d.mask = True
assert len(d.get_dep(filter=True)) == 3
d.mask = False
# This test looks like it does not do anything, but in fact "mask"
# is a property with complext logic, so the fact that setting it to
# False makes is False is non-trivial.
# I don't want to test for
# len(d.get_dep(filter=True)) == 0
# because the get_dep raises and error when no data is noticed
# and I don't want to test get_dep here, but the fact that setting
# the mask itself works.
assert d.mask is False
d.mask = [True, False, True]
assert len(d.get_dep(filter=True)) == 2
arr = numpy.ma.array([3, 4, 5])
# aka numpy.ma.nomask, but used in a more natural way
d.mask = arr.mask
assert len(d.get_dep(filter=True)) == 3
with pytest.raises(DataErr,
match="True, False, or a mask array"):
d.mask = None
def test_data_filter_no_data():
"""Check we get a excludes-all-data error"""
x = numpy.asarray([1, 2, 5])
d = Data1D('x', x, x)
assert d.mask
d.ignore()
assert d.mask is False
with pytest.raises(DataErr,
match="mask excludes all data"):
d.apply_filter([1, 2, 3])
def test_data_filter_invalid_size_scalar():
"""Check we get a size-mismatch error when sent a scalar"""
x = numpy.asarray([1, 2, 5])
d = Data1D('x', x, x)
d.ignore(None, 2)
assert d.mask == pytest.approx([False, False, True])
with pytest.raises(DataErr,
match="Array must be a sequence or None"):
d.apply_filter(4)
@pytest.mark.parametrize("vals", [[4], [2, 3, 4, 5]])
def test_data_filter_invalid_size_sequence(vals):
"""Check we get a size-mismatch error: sequence sent a 1D array"""
x = numpy.asarray([1, 2, 5])
d = Data1D('x', x, x)
d.ignore(None, 2)
with pytest.raises(DataErr,
match=f"size mismatch between data and array: 3 vs {len(vals)}"):
d.apply_filter(vals)
@pytest.mark.parametrize("vals", [[[2, 3, 4]], [[2, 3], [3, 2]]])
def test_data_filter_invalid_size_sequence_nd(vals):
"""Check we get a size-mismatch error: sequence sent a nD array"""
x = numpy.asarray([1, 2, 5])
d = Data1D('x', x, x)
d.ignore(None, 2)
with pytest.raises(DataErr,
match="Array must be 1D"):
d.apply_filter(vals)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_data_apply_filter_invalid_size(data):
"""There's no filter applied but the argument is the wrong size.
Test related to issue #1439 which is an issue with the DataPHA class.
"""
with pytest.raises(DataErr,
match=r"^size mismatch between data and array: (100?) vs 2$"):
data.apply_filter([1, 2])
@pytest.mark.parametrize("data_class,args", EMPTY_DATA_OBJECTS)
def test_data_apply_filter_empty(data_class, args):
"""There's no data so how can we filter?
We could error out or just return the supplied argument, so
this is a regression test
"""
data = data_class("empty", *args)
orig = [1, 2]
with pytest.raises(DataErr,
match="The size of 'empty' has not been set"):
data.apply_filter(orig)
@pytest.mark.parametrize("lo,hi,emsg", [("1:20", None, 'lower'), (None, "2", 'upper'), ("0.5", "7", 'lower')])
@pytest.mark.parametrize("ignore", [False, True])
def test_data1d_notice_errors_out_on_string_range(lo, hi, emsg, ignore):
"""Check we get an error if lo or hi are strings."""
xlo = numpy.asarray([1, 2, 5])
xhi = numpy.asarray([2, 3, 8])
y = numpy.zeros(3)
d = Data1D('tmp', xlo, xhi, y)
with pytest.raises(DataErr,
match=f"strings not allowed in {emsg} bound list"):
d.notice(lo, hi, ignore=ignore)
@pytest.mark.parametrize("expected,args",
[('2.0:20.0', []),
('', [(False, 1, 30)]),
('10.0:17.0', [(True, 7.1, 18)]),
('10.0:12.0,17.0', [(True, 7.1, 18), (False, 13, 16)]),
('2.0:12.0,17.0', [(True, 7.1, 18), (False, 13, 16), (True, 0, 12)]),
('10.0:12.0,17.0:20.0', [(True, 7.1, 18), (False, 13, 16), (True, 15.5, 30)]),
('', [(True, 7.1, 18), (False, 13, 16), (True, 6, 17), (False, 1, 40)]),
('2.0:20.0', [(True, 7.1, 18), (False, 13, 16), (True, 6, 17), (True, 1, 40)]),
])
def test_data1d_get_filter_calls(expected, args):
"""Basic check of get_filter
expected is the expected response
args is a list of 3-tuples of (flag, loval, hival) where
flag is True for notice and False for ignore; they define
the filter to apply
"""
xs = numpy.asarray([2, 5, 10, 12, 15, 17, 20])
ys = numpy.ones(xs.size)
d = Data1D('data', xs, ys)
for (flag, lo, hi) in args:
if flag:
d.notice(lo, hi)
else:
d.ignore(lo, hi)
assert d.get_filter(format='%.1f') == expected
@pytest.mark.parametrize("expected,args",
[('2.0:25.0', []),
('', [(False, 1, 30)]),
('5.0:20.0', [(True, 7.1, 18)]),
('5.0:12.0,17.0:20.0', [(True, 7.1, 18), (False, 13, 16)]),
('2.0:12.0,17.0:20.0', [(True, 7.1, 18), (False, 13, 16), (True, 0, 12)]),
('5.0:12.0,15.0:25.0', [(True, 7.1, 18), (False, 13, 16), (True, 15.5, 30)]),
('', [(True, 7.1, 18), (False, 13, 16), (True, 6, 17), (False, 1, 40)]),
('2.0:25.0', [(True, 7.1, 18), (False, 13, 16), (True, 6, 17), (True, 1, 40)]),
])
def test_data1dint_get_filter_calls(expected, args):
"""Basic check of get_filter
expected is the expected response
args is a list of 3-tuples of (flag, loval, hival) where
flag is True for notice and False for ignore; they define
the filter to apply
"""
# Note this is not a contiguous grid
xlos = numpy.asarray([2, 5, 10, 12, 15, 17, 20])
xhis = numpy.asarray([5, 8, 12, 15, 16, 20, 25])
ys = numpy.ones(xlos.size)
d = Data1DInt('data', xlos, xhis, ys)
for (flag, lo, hi) in args:
if flag:
d.notice(lo, hi)
else:
d.ignore(lo, hi)
assert d.get_filter(format='%.1f') == expected
def test_data1dint_get_x_xerr():
"""Check get_x/get_xerr when filtering
This was added because there was a bug when all data had been
filtered out. It is essentially the same as
test_data1dint_get_filter_calls since get_filter calls get_x,
but it does add explicit checks and a check of get_xerr.
"""
# Note this is not a contiguous grid
xlos = numpy.asarray([2, 5, 10, 12, 15, 17, 20])
xhis = numpy.asarray([5, 8, 12, 15, 16, 20, 25])
ys = numpy.ones(xlos.size)
d = Data1DInt('data', xlos, xhis, ys)
x = [3.5, 6.5, 11, 13.5, 15.5, 18.5, 22.5]
xerr = xhis - xlos
assert d.get_x() == pytest.approx(x)
assert d.get_xerr() == pytest.approx(xerr)
assert d.get_x(True) == pytest.approx(x)
assert d.get_xerr(True) == pytest.approx(xerr)
# Ignore a few points at the start and end
d.notice(11, 18)
# Just check that the default behavior doesn't change with the filter
assert d.get_x() == pytest.approx(x)
assert d.get_xerr() == pytest.approx(xerr)
assert d.get_x(True) == pytest.approx(x[2:-1])
assert d.get_xerr(True) == pytest.approx(xerr[2:-1])
# Now ignore all points
d.ignore(0, 1000)
assert d.get_x() == pytest.approx(x)
assert d.get_xerr() == pytest.approx(xerr)
assert d.get_x(True) == pytest.approx([])
assert d.get_xerr(True) == pytest.approx([])
@pytest.mark.parametrize('ignore', [False, True])
@pytest.mark.parametrize('lo,hi,evals',
[(0.5, 2.3, (0, 10, 0)),
(0.7, 2.1, (1, 8, 1)),
(0.5, 0.7, (0, 2, 8)),
(1.1, 1.3, (3, 2, 5)),
(2.1, 2.3, (8, 2, 0)),
# special case filters that are within a single bin
(0.45, 0.55, (0, 1, 9)),
(0.65, 0.75, (1, 1, 8)),
(1.05, 1.15, (3, 1, 6)),
(2.25, 2.35, (9, 1, 0)),
# outside the limits
(0.1, 0.4, (0, 0, 10)),
(0.1, 0.5, (0, 1, 9)),
(2.41, 2.8, (10, 0, 0)),
# Now queries on the edge of each bin; these would ideally
# only match 1 bin
(0.4, 0.6, (0, 1, 9)),
(0.6, 0.8, (1, 1, 8)),
(0.8, 1.0, (2, 1, 7)),
(1.0, 1.2, (3, 1, 6)),
(1.2, 1.4, (4, 1, 5)),
(1.4, 1.6, (5, 1, 4)),
(1.6, 1.8, (6, 1, 3)),
(1.8, 2.0, (7, 1, 2)),
(2.0, 2.2, (8, 1, 1)),
(2.2, 2.4, (9, 1, 0)),
# check last upper limit
(2.4, 2.6, (10, 0, 0))
])
def test_data1dint_check_limit(ignore, lo, hi, evals):
"""Does Data1DInt handle limits (in particular upper limits).
This is based on sherpa/astro/tests/test_astro_data.py::test_pha_check_limit
but without the need for an ARF. It selects different bins than the
PHA case!
"""
egrids = 0.2 + 0.2 * numpy.arange(1, 12)
d = Data1DInt('exammple', egrids[:-1], egrids[1:],
numpy.ones(10))
assert d.mask is True
func = d.ignore if ignore else d.notice
func(lo, hi)
if ignore:
vout = True
vin = False
else:
vout = False
vin = True
c1, c2, c3 = evals
expected = [vout] * c1 + [vin] * c2 + [vout] * c3
assert d.mask == pytest.approx(expected)
def test_filter_apply_none():
"""What happens here?
This is just to ensure a code path is tested. We might want to
understand if we can ever call apply with None in a "normal" use
case.
"""
assert Filter().apply(None) is None
def test_data_mask_when_no_elements():
"""what happens when there's no data?"""
data = Data1D("x", None, None)
assert data.mask is True
with pytest.raises(DataErr,
match="The independent axis has not been set yet"):
data.mask = [1, 2]
def test_data1d_get_y_checks_model_dim():
"""Check an error message"""
data = Data1D("x", None, None)
mdl = Polynom2D()
with pytest.raises(DataErr,
match="Data and model dimensionality do not match: 1D and 2D"):
data.get_y(yfunc=mdl)
def test_ispace2d_mismatch():
"""There is currently no check for this mis-match.
This is a regression test.
"""
x0 = numpy.arange(10)
x1 = numpy.arange(11)
with pytest.raises(DataErr,
match="size mismatch between x0 and x1: 9 vs 10"):
IntegratedDataSpace2D(Filter(), x0[:-1], x1[:-1], x0[1:], x1[1:])
@pytest.fixture
def make_data2dint():
"""Create a simple 2D Int data set."""
# A 1 by 2 grid, which we make sure does not start at 1,1 to check
# that this is handled correctly.
#
x1, x0 = numpy.mgrid[10:12, -5:-4]
shape = x0.shape
x0 = x0.flatten()
x1 = x1.flatten()
y = numpy.asarray([10, 5])
return Data2DInt("ival", x0, x1, x0 + 1, x1 + 1,
y, shape=shape)
def test_data2dint_create(make_data2dint):
"""Check we can create a basic integrated 2D data set.
See issue #1379
"""
x0 = numpy.asarray([-5, -5])
x1 = numpy.asarray([10, 11])
img = make_data2dint
assert (img.dep == [10, 5]).all()
assert len(img.indep) == 4
assert (img.indep[0] == x0).all()
assert (img.indep[1] == x1).all()
assert (img.indep[2] == (x0 + 1)).all()
assert (img.indep[3] == (x1 + 1)).all()
# I was initially surprised there was no header, so just make sure
# we check for it not being here.
#
assert not(hasattr(img, "header"))
def test_data2dint_show(make_data2dint):
"""Check we can show a basic integrated 2D data set.
See issue #1379
"""
img = make_data2dint
# This fails because there's problems getting x0 and x0lo
# attributes.
#
out = str(img).split("\n")
assert out[0] == "name = ival"
assert out[1] == "x0lo = Int64[2]"
assert out[2] == "x1lo = Int64[2]"
assert out[3] == "x0hi = Int64[2]"
assert out[4] == "x1hi = Int64[2]"
assert out[5] == "y = Int64[2]"
assert out[6] == "staterror = None"
assert out[7] == "syserror = None"
assert out[8] == "shape = (2, 1)"
assert len(out) == 9
def test_data2dint_get_x0(make_data2dint):
x0 = numpy.asarray([-5, -5])
x = (x0 + x0 + 1) / 2
assert (make_data2dint.get_x0() == x).all()
def test_data2dint_x0(make_data2dint):
x0 = numpy.asarray([-5, -5])
x = (x0 + x0 + 1) / 2
assert (make_data2dint.x0 == x).all()
def test_data2dint_get_x1(make_data2dint):
x1 = numpy.asarray([10, 11])
x = (x1 + x1 + 1) / 2
assert (make_data2dint.get_x1() == x).all()
def test_data2dint_x1(make_data2dint):
x1 = numpy.asarray([10, 11])
x = (x1 + x1 + 1) / 2
assert (make_data2dint.x1 == x).all()
def test_data2dint_x0lo(make_data2dint):
assert (make_data2dint.x0lo == [-5, -5]).all()
def test_data2dint_x0hi(make_data2dint):
assert (make_data2dint.x0hi == [-4, -4]).all()
def test_data2dint_x1lo(make_data2dint):
assert (make_data2dint.x1lo == [10, 11]).all()
def test_data2dint_x1hi(make_data2dint):
assert (make_data2dint.x1hi == [11, 12]).all()
def test_data2dint_get_y(make_data2dint):
assert (make_data2dint.get_y() == [10, 5]).all()
def test_data2dint_y(make_data2dint):
assert (make_data2dint.y == [10, 5]).all()
def test_data2dint_get_dep(make_data2dint):
assert (make_data2dint.get_dep() == [10, 5]).all()
def test_data2dint_get_x0label(make_data2dint):
assert make_data2dint.get_x0label() == "x0"
def test_data2dint_get_x1label(make_data2dint):
assert make_data2dint.get_x1label() == "x1"
def test_data2dint_get_ylabel(make_data2dint):
assert make_data2dint.get_ylabel() == "y"
def test_data2dint_get_axes(make_data2dint):
axes = make_data2dint.get_axes()
assert len(axes) == 2
assert (axes[0] == [1]).all()
assert (axes[1] == [1, 2]).all()
def test_data2dint_notice(make_data2dint):
"""basic notice call"""
img = make_data2dint
# The mask attribute can be True, False, or a ndarray. Fortunately
# using an ndarray as a truthy value throws a ValueError.
#
assert img.mask
# Data is defined on x0=-5, x1=10,11
# so this excludes the second point.
#
img.notice(x1lo=10, x1hi=11)
assert (img.mask == numpy.asarray([True, False])).all()
def test_data2dint_ignore(make_data2dint):
"""basic ignore call"""
img = make_data2dint
assert img.mask
img.notice(x1lo=10, x1hi=11, ignore=True)
assert (img.mask == numpy.asarray([False, True])).all()
def test_data2dint_ignore_get_filter(make_data2dint):
"""What exactly does get_filter return here?
The current behavior does not look sensible.
"""
img = make_data2dint
assert img.mask
img.notice(x1lo=10, x1hi=11, ignore=True)
assert img.get_filter() == ''
def test_data2dint_ignore_get_filter_expr(make_data2dint):
"""What exactly does get_filter_expr return here?
The current behavior does not look sensible.
"""
img = make_data2dint
assert img.mask
img.notice(x1lo=10, x1hi=11, ignore=True)
assert img.get_filter_expr() == ''
def test_data2dint_notice_get_x0(make_data2dint):
"""basic notice call + get_x0"""
img = make_data2dint
img.notice(x1lo=10, x1hi=11)
assert (img.get_x0() == numpy.asarray([-4.5, -4.5])).all()
assert (img.get_x0(True) == numpy.asarray([-4.5])).all()
def test_data2dint_notice_get_x1(make_data2dint):
"""basic notice call + get_x1"""
img = make_data2dint
img.notice(x1lo=10, x1hi=11)
assert (img.get_x1() == numpy.asarray([10.5, 11.5])).all()
assert (img.get_x1(True) == numpy.asarray([10.5])).all()
def test_data2dint_notice_get_y(make_data2dint):
"""basic notice call + get_y"""
img = make_data2dint
img.notice(x1lo=10, x1hi=11)
assert (img.get_y() == numpy.asarray([10, 5])).all()
assert (img.get_y(True) == numpy.asarray([10])).all()
def test_data2dint_get_dims(make_data2dint):
assert make_data2dint.get_dims() == (1, 2)
def test_data2dint_get_img(make_data2dint):
ival = make_data2dint.get_img()
assert ival.shape == (2, 1)
assert (ival == numpy.asarray([[10], [5]])).all()
def test_data2dint_get_img_model(make_data2dint):
"""Check we can evaluate a model AND we ignore a filter"""
img = make_data2dint
# This model evaluates
# mdl.c + mdl.cx1 * x0 + mdl.cy1 * x1
#
# which becomes, because we use the middle of the bin
#
# 10 + 1 * (-4.5) + 10 * (10.5, 11.5)
# = (110.5, 120.5)
#
mdl = Polynom2D()
mdl.c = 10
mdl.cy1 = 10
mdl.cx1 = 1
# This selects only one point.
#
img.notice(x1lo=10, x1hi=11)
# This should ignore the filter.
#
ivals = img.get_img(mdl)
assert len(ivals) == 2
assert ivals[0].shape == (2, 1)
assert ivals[1].shape == (2, 1)
assert (ivals[0] == numpy.asarray([[10], [5]])).all()
assert (ivals[1] == numpy.asarray([[110.5], [120.5]])).all()
def test_data2dint_get_max_pos(make_data2dint):
assert make_data2dint.get_max_pos() == (-4.5, 10.5)
def test_data2dint_get_bounding_mask(make_data2dint):
"""Data2D/Data2DInt do not have get_bounding_mask"""
assert not hasattr(make_data2dint, "get_bounding_mask")
@pytest.mark.parametrize("method",
["get_error",
"get_imgerr",
"get_staterror",
"get_syserror",
"get_yerr"
])
def test_data2dint_method_is_none(method, make_data2dint):
"""Check those methods that return None"""
func = getattr(make_data2dint, method)
assert func() is None
@pytest.mark.parametrize("attribute",
["staterror",
"syserror"
])
def test_data2dint_attribute_is_none(attribute, make_data2dint):
"""Check those attributes that return None"""
attr = getattr(make_data2dint, attribute)
assert attr is None
# We do not include the base Data in this list because the
# notice/ignore call does not match the 1D cases.
#
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
def test_is_mask_reset(data, caplog):
"""What happens to the mask attribute after the independent axis is changed?"""
# Pick a value somewhere within the independent axis
assert data.mask is True
data.ignore(None, 4)
assert isinstance(data.mask, numpy.ndarray)
omask = data.mask.copy()
# Change the independent axis, but to something of the same
# length.
assert len(caplog.records) == 0
with caplog.at_level(logging.INFO, logger='sherpa'):
indep = [x + 100 for x in data.indep]
data.indep = tuple(indep)
assert len(caplog.records) == 0
# The mask has *not* been cleared
assert data.mask == pytest.approx(omask)
@pytest.mark.parametrize("data", (Data, ) + ALL_DATA_CLASSES, indirect=True)
def test_dependent_field_can_not_be_a_scalar(data):
"""This is to contrast with test_related_fields_can_not_be_a_scalar.
This is tested elsewhere but leave here to point out that the related
fields are not all handled the same.
"""
with pytest.raises(DataErr,
match="Array must be a sequence or None"):
data.y = 2
@pytest.mark.parametrize("data", (Data, ) + ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("related", ["syserror", "staterror"])
def test_related_field_can_not_be_a_scalar(related, data):
"""The related fields (staterror/syserror) can not be a scalar."""
with pytest.raises(DataErr,
match="Array must be a sequence or None"):
setattr(data, related, 2)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_mask_sent_scalar_nomask(data):
"""What happens if the mask is sent a scalar ma.nomask?"""
assert data.mask is True
# Just check we change the field
data.mask = False
assert data.mask is False
# Check that nomask is treated as "notice everything"
data.mask = numpy.ma.nomask
assert data.mask is True
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_mask_sent_scalar_non_bool(data):
"""What happens if the mask is sent a scalar non-bool?"""
with pytest.raises(DataErr,
match="'mask' must be True, False, or a mask array"):
data.mask = "true"
def test_mask_sent_array_non_bool():
"""What happens if the mask is sent an array of non-bool?
Note that this succeeds, unlike the scalar non-bool case.
The test is only of the Data1DInt case as it is easier to
handle, rather than using ALL_DATA_CLASSES.
"""
data = Data1DInt(*DATA1DINT_ARGS)
mask = [1, 0, 1.0, 0.0, "true", "false", None, -23.0, {}, {"a"}]
expected = [True, False, True, False, True, True, False, True, False, True]
data.mask = mask
assert data.mask == pytest.approx(expected)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_mask_size_must_match(data):
"""Check if the mask can be set to the wrong length"""
with pytest.raises(DataErr,
match="^size mismatch between independent axis and mask: 100? vs 3$"):
data.mask = [1, 0, 1]
@pytest.mark.parametrize("data", (Data, ) + DATA_1D_CLASSES, indirect=True)
def test_reduce_axis_size_1d(data):
"""What happens if we reduce the independent axis?"""
nindep = len(data.indep)
for indep in data.indep:
assert len(indep) == 10
for attr in ["dep", "staterror", "syserror"]:
aval = getattr(data, attr)
assert len(aval) == 10
# Sanity checks.
#
for a, b in zip(data.indep, data.get_indep()):
assert numpy.all(a == b)
# Let's make the independent axis smaller.
#
smaller = []
for indep in data.indep:
smaller.append(indep[1:-1])
with pytest.raises(DataErr,
match="independent axis can not change size: 10 to 8"):
data.indep = tuple(smaller)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_reduce_axis_size_2d(data):
"""What happens if we reduce the independent axis?
There is a shape attribute which could be changed, or
maybe should be changed.
"""
nindep = len(data.indep)
for indep in data.indep:
assert len(indep) == 100
for attr in ["dep", "staterror", "syserror"]:
aval = getattr(data, attr)
assert len(aval) == 100
assert data.shape == (10, 10)
# Sanity checks.
#
for a, b in zip(data.indep, data.get_indep()):
assert numpy.all(a == b)
# Let's make the independent axis smaller.
#
smaller = []
for indep in data.indep:
smaller.append(indep[1:-1])
with pytest.raises(DataErr,
match="independent axis can not change size: 100 to 98"):
data.indep = tuple(smaller)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("val,etype",
[(1, "int"),
([1, 2, 3], "list"),
(numpy.asarray([1,2, 3]), "ndarray")
])
def test_invalid_independent_axis_not_a_tuple_set_indep(val, etype, data):
"""The independent axis must be a tuple: set_indep"""
with pytest.raises(TypeError,
match=f"independent axis must be sent a tuple, not {etype}"):
data.set_indep(val)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("val,etype",
[(1, "int"),
([1, 2, 3], "list"),
(numpy.asarray([1,2, 3]), "ndarray")
])
def test_invalid_independent_axis_not_a_tuple_indep(val, etype, data):
"""The independent axis must be a tuple: .indep"""
with pytest.raises(TypeError,
match=f"independent axis must be sent a tuple, not {etype}"):
data.indep = val
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_invalid_independent_axis(data):
"""What happens if we use the wrong number of independent axes?
We just duplicate the current axes.
"""
indep = data.indep
with pytest.raises(DataErr,
match="^data set 'data_test' sent wrong tuple size for the independent axis: [124] not [248]$"):
data.indep = tuple(list(indep) * 2)
@pytest.mark.parametrize("data", (Data1DInt, Data2D, Data2DInt), indirect=True)
def test_invalid_independent_axis_component_size(data):
"""What happens if we use mis-matched sizes?
It only makes sense to do this for data classes with
multiple components. We remove one entry from the
second component.
"""
indep = list(data.indep)
indep[1] = indep[1][:-1]
with pytest.raises(DataErr,
match=r"^size mismatch between (lo|x0) and (hi|x1): (10|100|99) vs (9|99|100)$"):
data.indep = tuple(indep)
@pytest.mark.parametrize("data", (Data1DInt, Data2D, Data2DInt), indirect=True)
def test_invalid_independent_axis_component_none(data):
"""What happens if we use mis-matched sizes (by setting one to None).
See test_invalid_independent_axis_component_size.
"""
indep = list(data.indep)
indep[1] = None
with pytest.raises(DataErr,
match=r"^size mismatch between (lo|x0) and (hi|x1): (10|100|None) vs (0|100|None)$"):
data.indep = tuple(indep)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_invalid_dependent_axis(data):
"""What happens if the dependent axis does not match the independent axis?
"""
with pytest.raises(DataErr,
match=r"^size mismatch between independent axis and y: 100? vs 9?8$"):
data.y = data.y[:-2]
@pytest.mark.parametrize("data_class", ALL_DATA_CLASSES)
def test_make_invalid_dependent_axis(data_class):
"""What happens if call constructor with invalid independent axis?
"""
# Take the correct arguments and reduce the independent axis by one.
# Use a copy of everything just in case.
args = []
for arg in INSTANCE_ARGS[data_class]:
if isinstance(arg, numpy.ndarray):
arg = arg.copy()
args.append(arg)
ypos = POS_Y_ARRAY[data_class]
args[ypos] = args[ypos][:-1]
with pytest.raises(DataErr,
match=r"^size mismatch between independent axis and y: 100? vs 9?9$"):
data_class(*args)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_set_independent_axis_to_none(data):
"""What happens if we clear the independent axis?"""
assert all(d is not None for d in data.indep)
indep = [None for d in data.indep]
with pytest.raises(DataErr,
match="independent axis can not be cleared"):
data.set_indep(tuple(indep))
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("column", ["staterror", "syserror"])
def test_set_error_axis_wrong_length(data, column):
"""What happens if the column is set to the wrong length?"""
col = getattr(data, column)
assert col is not None
with pytest.raises(DataErr,
match=rf"^size mismatch between independent axis and {column}: (100?) vs 2$"):
setattr(data, column, [1, 2])
@pytest.mark.parametrize("column", ["y", "staterror", "syserror"])
def test_check_related_fields_correct_size(column):
"""If we set a related field before the independent axis, what happens if different?
I am just doing this for Data1D rather than trying to cover
all cases. There is a DataPHA version in
sherpa/astro/tests/test_astro_data2.py called
test_grouped_pha_check_related_fields_correct_size
"""
d = Data1D('example', None, None)
setattr(d, column, numpy.asarray([2, 10, 3]))
with pytest.raises(DataErr,
match="independent axis can not change size: 3 to 4"):
d.indep = (numpy.asarray([2, 3, 4, 5]), )
def test_data1d_mismatched_related_fields():
"""Check setting the related fields to different sizes: Data1D
This is a regression test to check when the mis-match is detected,
if it is. It is important that we have not set the dependent axis
here, as there is likely to be better support for checking the
dependent and independent axes than the related axes.
The assumption here is that we don't need to test all the classes.
"""
# Create an empty object, set the syserror and staterror fields to
# different lengths, then set the independent axis.
#
d = Data1D("x", None, None)
d.staterror = [1, 2, 3, 4]
with pytest.raises(DataErr,
match="size mismatch between independent axis and syserror: 4 vs 6"):
d.syserror = [2, 3, 4, 5, 20, 12]
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_indep_must_be_1d(data):
"""Check that the indep data must be 1D.
Do we report an error because the dimensionality does not match
or that the length check fails.
"""
indep = tuple([d.reshape(2, d.size // 2) for d in data.indep])
with pytest.raises(DataErr,
match="Array must be 1D"):
data.indep = indep
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_dep_must_be_1d(data):
"""Check that the dependent data must be 1D."""
dep = data.dep.reshape(2, data.dep.size // 2)
with pytest.raises(DataErr,
match="Array must be 1D"):
data.set_dep(dep)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("column", ["staterror", "syserror"])
def test_error_must_be_1d(data, column):
"""Check that the error data must be 1D."""
errval = getattr(data, column)
errval = errval.reshape(2, errval.size // 2)
with pytest.raises(DataErr,
match="Array must be 1D"):
setattr(data, column, errval)
@pytest.mark.parametrize("data", DATA_1D_CLASSES, indirect=True)
@pytest.mark.parametrize("funcname", ["eval_model", "eval_model_to_fit"])
def test_data_eval_model_checks_dimensionality_1d(data, funcname):
"""Does eval_model check the model dimensionality?"""
model = Polynom2D()
func = getattr(data, funcname)
with pytest.raises(DataErr,
match="Data and model dimensionality do not match: 1D and 2D"):
func(model)
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
@pytest.mark.parametrize("funcname", ["eval_model", "eval_model_to_fit"])
def test_data_eval_model_checks_dimensionality_2d(data, funcname):
"""Does eval_model check the model dimensionality?"""
model = Polynom1D()
func = getattr(data, funcname)
with pytest.raises(DataErr,
match="Data and model dimensionality do not match: 2D and 1D"):
func(model)
def test_data1d_create_not_ndarray():
"""If sent non nd-array fields, does __init__ convert them?
This is a regression test.
"""
d = Data1D('x', [1, 2, 3], (4, 5, 6),
staterror=(8, 7, 6), syserror=[2, 3, 4])
assert isinstance(d.indep, tuple)
assert len(d.indep) == 1
assert isinstance(d.indep[0], numpy.ndarray)
assert isinstance(d.y, numpy.ndarray)
assert isinstance(d.staterror, numpy.ndarray)
assert isinstance(d.syserror, numpy.ndarray)
def test_data1dint_create_not_ndarray():
"""If sent non nd-array fields, does __init__ convert them?
This is a regression test.
"""
d = Data1DInt('x', [2, 3, 4], (2.5, 4.5, 4.8), (4, 5, 6),
staterror=(8, 7, 6), syserror=[2, 3, 4])
assert isinstance(d.indep, tuple)
assert len(d.indep) == 2
assert isinstance(d.indep[0], numpy.ndarray)
assert isinstance(d.indep[1], numpy.ndarray)
assert isinstance(d.y, numpy.ndarray)
assert isinstance(d.staterror, numpy.ndarray)
assert isinstance(d.syserror, numpy.ndarray)
def test_data2d_create_not_ndarray():
"""If sent non nd-array fields, does __init__ convert them?
This is a regression test.
"""
d = Data2D('x', [2, 3, 4], (15, 16, 17), (4, 5, 6),
staterror=(8, 7, 6), syserror=[2, 3, 4])
assert isinstance(d.indep, tuple)
assert len(d.indep) == 2
assert isinstance(d.indep[0], numpy.ndarray)
assert isinstance(d.indep[1], numpy.ndarray)
assert isinstance(d.y, numpy.ndarray)
assert isinstance(d.staterror, numpy.ndarray)
assert isinstance(d.syserror, numpy.ndarray)
def test_data2dint_create_not_ndarray():
"""If sent non nd-array fields, does __init__ convert them?
This is a regression test.
"""
d = Data2DInt('x', [2, 3, 4], (15, 16, 17),
(2.5, 4.5, 4.8), (16, 17, 18), (4, 5, 6),
staterror=(8, 7, 6), syserror=[2, 3, 4])
assert isinstance(d.indep, tuple)
assert len(d.indep) == 4
assert isinstance(d.indep[0], numpy.ndarray)
assert isinstance(d.indep[1], numpy.ndarray)
assert isinstance(d.indep[2], numpy.ndarray)
assert isinstance(d.indep[3], numpy.ndarray)
assert isinstance(d.y, numpy.ndarray)
assert isinstance(d.staterror, numpy.ndarray)
assert isinstance(d.syserror, numpy.ndarray)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("field", ["staterror", "syserror"])
def test_data_set_not_ndarray(data, field):
"""What happens if the field is set to a non-ndarray after creation?
This is a regression test.
"""
setattr(data, field, tuple([1] * len(data.y)))
got = getattr(data, field)
assert isinstance(got, numpy.ndarray)
@pytest.mark.parametrize("data", ALL_DATA_CLASSES, indirect=True)
def test_data_mask_set_not_ndarray(data):
"""What happens if the mask field is set to a non-ndarray after creation?
This is a regression test.
"""
data.mask = tuple([1] * len(data.y))
assert isinstance(data.mask, numpy.ndarray)
@pytest.mark.parametrize("data_class,args", EMPTY_DATA_OBJECTS)
def test_data_is_empty(data_class, args):
"""There is no size attribute"""
data = data_class("empty", *args)
assert data.size is None
@pytest.mark.parametrize("data", (Data, ) + DATA_1D_CLASSES, indirect=True)
def test_data_size_1d(data):
"""Check the size field.
This is separated into 1D and 2D cases as it is
easier to check given the existing test infrastructure.
"""
assert data.size == 10
@pytest.mark.parametrize("data", DATA_2D_CLASSES, indirect=True)
def test_data_size_2d(data):
"""Check the size field.
This is separated into 1D and 2D cases as it is
easier to check given the existing test infrastructure.
"""
assert data.size == 100
@pytest.mark.parametrize("data_class,args", EMPTY_DATA_OBJECTS)
def test_data_can_not_set_dep_to_scalar_when_empty(data_class, args):
"""Check out how we error out.
This is a regression test.
"""
data = data_class("empty", *args)
with pytest.raises(DataErr,
match="The size of 'empty' has not been set"):
data.set_dep(2)
@pytest.mark.parametrize("data_class,args", EMPTY_DATA_OBJECTS[2:])
@pytest.mark.parametrize("index", ["x0", "x1"])
def test_data_empty_get_x_2d(data_class, args, index):
"""What happens when there's no data?
This is a regression test.
"""
data = data_class("empty", *args)
getfunc = getattr(data, f"get_{index}")
assert getfunc() is None
@pytest.mark.parametrize("data_copy", ALL_DATA_CLASSES, indirect=True)
def test_data_change_independent_element(data_copy):
"""What happens if we change an element of the independent axis?"""
data = data_copy
# The x axis is > 0. but just check this
assert data.indep[0][1] > 0
# change the second element of the first component
with pytest.raises(ValueError,
match="assignment destination is read-only"):
data.indep[0][1] = -100
@pytest.mark.parametrize("data_copy", ALL_DATA_CLASSES, indirect=True)
def test_data_change_dependent_element(data_copy):
"""What happens if we change an element of the dependent axis?"""
data = data_copy
#just check we are changing the value
assert data.dep[1] > 0
expected = data.dep.copy()
expected[1] = -1000
# change the second element
data.dep[1] = -1000
# check we have only changed the one element
assert data.dep == pytest.approx(expected)
@pytest.mark.parametrize("data_copy", ALL_DATA_CLASSES, indirect=True)
@pytest.mark.parametrize("field", ["staterror", "syserror"])
def test_data_change_related_element(data_copy, field):
"""What happens if we change an element of a 'related' field?"""
data = data_copy
attr = getattr(data, field)
#just check we are changing the value
assert attr[1] < 500
expected = attr.copy()
expected[1] = 1000
# change the second element
attr[1] = 1000
# check we have only changed the one element
assert attr == pytest.approx(expected)
# Use the get_<field> call to check we have been changing things.
#
getfunc = getattr(data, f"get_{field}")
assert getfunc(filter=False) == pytest.approx(expected)
def test_data1d_do_we_copy_the_independent_axis():
"""Do we copy or just use the initial argument for the independent axis?
We could do this for all the data classes but it would be a bit
involved to set up.
This is a regression test.
"""
x = numpy.asarray([-100, 20, 45])
y = numpy.asarray([-13, 13, -12])
data = Data1D("change", x, y)
assert len(data.indep) == 1
assert data.indep[0] == pytest.approx(x)
# If an element of x is changed, does the independent axis change?
xorig = x.copy()
x[1] = -20
assert data.indep[0] == pytest.approx(xorig)
def test_data1d_do_we_copy_the_independent_axis_v2():
"""Do we copy or just use the initial argument for the independent axis?"""
x = numpy.asarray([-100, 20, 45])
y = numpy.asarray([-13, 13, -12])
data = Data1D("change", x, y)
with pytest.raises(ValueError,
match="assignment destination is read-only"):
data.indep[0][1] = -20
def test_data1d_do_we_copy_the_dependent_axis():
"""Do we copy or just use the initial argument for the dependent axis?
We could do this for all the data classes but it would be a bit
involved to set up.
This is a regression test.
"""
x = numpy.asarray([-100, 20, 45])
y = numpy.asarray([-13, 13, -12])
data = Data1D("change", x, y)
assert len(data.indep) == 1
assert data.y == pytest.approx(y)
# If an element of x is changed, does the dependent axis change?
y[1] = -20
assert data.y == pytest.approx(y)
def test_data1d_compare_mask_and_filter():
"""We can use ignore/notice or change the mask to get the same result"""
x = numpy.asarray([10, 20, 25, 30, 50])
y = x * 10
data = Data1D("ex", x, y)
assert data.mask
# Use notice/ignore
#
data.notice(15, 40)
data.ignore(23, 27)
assert data.mask == pytest.approx([0, 1, 0, 1, 0])
assert data.get_dep(filter=True) == pytest.approx([200, 300])
data.notice()
assert data.mask
# Change the mask array directly
data.mask = [0, 1, 0, 1, 0]
assert data.mask == pytest.approx([0, 1, 0, 1, 0])
assert data.get_dep(filter=True) == pytest.approx([200, 300])
# change an individual element
mask = data.mask
mask[2] = True
assert data.mask == pytest.approx([0, 1, 1, 1, 0])
assert data.get_dep(filter=True) == pytest.approx([200, 250, 300])
| gpl-3.0 |
sshleifer/object_detection_kitti | attention_ocr/python/demo_inference.py | 2 | 3332 | """A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.python.platform import flags
import common_flags
import datasets
import model as attention_ocr
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='float32')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.gfile.GFile(path))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def load_model(checkpoint, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
images_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, height, width, 3])
endpoints = model.create_base(images_placeholder, labels_one_hot=None)
init_fn = model.create_init_fn_to_restore(checkpoint)
return images_placeholder, endpoints, init_fn
def main(_):
images_placeholder, endpoints, init_fn = load_model(FLAGS.checkpoint,
FLAGS.batch_size,
FLAGS.dataset_name)
images_data = load_images(FLAGS.image_path_pattern, FLAGS.batch_size,
FLAGS.dataset_name)
with tf.Session() as sess:
tf.tables_initializer().run() # required by the CharsetMapper
init_fn(sess)
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
print("Predicted strings:")
for line in predictions:
print(line)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
pianomania/scikit-learn | sklearn/ensemble/__init__.py | 145 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/ensemble/__init__.py | 145 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
ray-project/ray | rllib/algorithms/ars/ars.py | 1 | 22819 | # Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter and from
# https://github.com/modestyachts/ARS
from collections import namedtuple
import logging
import numpy as np
import random
import time
from typing import Optional
import ray
from ray.rllib.algorithms import Algorithm
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided
from ray.rllib.algorithms.ars.ars_tf_policy import ARSTFPolicy
from ray.rllib.algorithms.es import optimizers, utils
from ray.rllib.algorithms.es.es_tf_policy import rollout
from ray.rllib.env.env_context import EnvContext
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils import FilterManager
from ray.rllib.utils.actor_manager import FaultAwareApply
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.metrics import (
NUM_AGENT_STEPS_SAMPLED,
NUM_AGENT_STEPS_TRAINED,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_TRAINED,
)
from ray.rllib.utils.torch_utils import set_torch_seed
logger = logging.getLogger(__name__)
Result = namedtuple(
"Result",
[
"noise_indices",
"noisy_returns",
"sign_noisy_returns",
"noisy_lengths",
"eval_returns",
"eval_lengths",
],
)
class ARSConfig(AlgorithmConfig):
"""Defines a configuration class from which an ARS Algorithm can be built.
Example:
>>> from ray.rllib.algorithms.ars import ARSConfig
>>> config = ARSConfig() # doctest: +SKIP
>>> config = config.training(report_length=20) # doctest: +SKIP
>>> config = config.resources(num_gpus=0) # doctest: +SKIP
>>> config = config.rollouts(num_rollout_workers=4) # doctest: +SKIP
>>> config = config.environment("CartPole-v1") # doctest: +SKIP
>>> print(config.to_dict()) # doctest: +SKIP
>>> # Build a Algorithm object from the config and run 1 training iteration.
>>> algo = config.build() # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
Example:
>>> from ray.rllib.algorithms.ars import ARSConfig
>>> from ray import air
>>> from ray import tune
>>> config = ARSConfig()
>>> # Print out some default values.
>>> print(config.action_noise_std) # doctest: +SKIP
>>> # Update the config object.
>>> config = config.training( # doctest: +SKIP
... rollouts_used=tune.grid_search([32, 64]), eval_prob=0.5)
>>> # Set the config object's env.
>>> config = config.environment(env="CartPole-v1") # doctest: +SKIP
>>> # Use to_dict() to get the old-style python config dict
>>> # when running with tune.
>>> tune.Tuner( # doctest: +SKIP
... "ARS",
... run_config=air.RunConfig(stop={"episode_reward_mean": 200}),
... param_space=config.to_dict(),
... ).fit()
"""
def __init__(self):
"""Initializes a ARSConfig instance."""
super().__init__(algo_class=ARS)
# fmt: off
# __sphinx_doc_begin__
# ARS specific settings:
self.action_noise_std = 0.0
self.noise_stdev = 0.02
self.num_rollouts = 32
self.rollouts_used = 32
self.sgd_stepsize = 0.01
self.noise_size = 250000000
self.eval_prob = 0.03
self.report_length = 10
self.offset = 0
self.tf_single_threaded = True
# Override some of AlgorithmConfig's default values with ARS-specific values.
self.num_rollout_workers = 2
self.observation_filter = "MeanStdFilter"
# ARS will use Algorithm's evaluation WorkerSet (if evaluation_interval > 0).
# Therefore, we must be careful not to use more than 1 env per eval worker
# (would break ARSPolicy's compute_single_action method) and to not do
# obs-filtering.
self.evaluation(
evaluation_config={
"num_envs_per_worker": 1,
"observation_filter": "NoFilter",
}
)
# __sphinx_doc_end__
# fmt: on
@override(AlgorithmConfig)
def training(
self,
*,
action_noise_std: Optional[float] = NotProvided,
noise_stdev: Optional[float] = NotProvided,
num_rollouts: Optional[int] = NotProvided,
rollouts_used: Optional[int] = NotProvided,
sgd_stepsize: Optional[float] = NotProvided,
noise_size: Optional[int] = NotProvided,
eval_prob: Optional[float] = NotProvided,
report_length: Optional[int] = NotProvided,
offset: Optional[int] = NotProvided,
tf_single_threaded: Optional[bool] = NotProvided,
**kwargs,
) -> "ARSConfig":
"""Sets the training related configuration.
Args:
action_noise_std: Std. deviation to be used when adding (standard normal)
noise to computed actions. Action noise is only added, if
`compute_actions` is called with the `add_noise` arg set to True.
noise_stdev: Std. deviation of parameter noise.
num_rollouts: Number of perturbs to try.
rollouts_used: Number of perturbs to keep in gradient estimate.
sgd_stepsize: SGD step-size used for the Adam optimizer.
noise_size: Number of rows in the noise table (shared across workers).
Each row contains a gaussian noise value for each model parameter.
eval_prob: Probability of evaluating the parameter rewards.
report_length: How many of the last rewards we average over.
offset: Value to subtract from the reward (e.g. survival bonus
from humanoid) during rollouts.
tf_single_threaded: Whether the tf-session should be generated without any
parallelism options.
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if action_noise_std is not NotProvided:
self.action_noise_std = action_noise_std
if noise_stdev is not NotProvided:
self.noise_stdev = noise_stdev
if num_rollouts is not NotProvided:
self.num_rollouts = num_rollouts
if rollouts_used is not NotProvided:
self.rollouts_used = rollouts_used
if sgd_stepsize is not NotProvided:
self.sgd_stepsize = sgd_stepsize
if noise_size is not NotProvided:
self.noise_size = noise_size
if eval_prob is not NotProvided:
self.eval_prob = eval_prob
if report_length is not NotProvided:
self.report_length = report_length
if offset is not NotProvided:
self.offset = offset
if tf_single_threaded is not NotProvided:
self.tf_single_threaded = tf_single_threaded
return self
@override(AlgorithmConfig)
def validate(self) -> None:
# Call super's validation method.
super().validate()
if self.num_gpus > 1:
raise ValueError("`num_gpus` > 1 not yet supported for ARS!")
if self.num_rollout_workers <= 0:
raise ValueError("`num_rollout_workers` must be > 0 for ARS!")
if (
self.evaluation_config is not None
and self.evaluation_config.get("num_envs_per_worker") != 1
):
raise ValueError(
"`evaluation_config.num_envs_per_worker` must always be 1 for "
"ARS! To parallelize evaluation, increase "
"`evaluation_num_workers` to > 1."
)
if (
self.evaluation_config is not None
and self.evaluation_config.get("observation_filter") != "NoFilter"
):
raise ValueError(
"`evaluation_config.observation_filter` must always be "
"`NoFilter` for ARS!"
)
@ray.remote
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers."""
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable:
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i : i + dim]
def sample_index(self, dim):
return np.random.randint(0, len(self.noise) - dim + 1)
def get_delta(self, dim):
idx = self.sample_index(dim)
return idx, self.get(idx, dim)
@ray.remote(max_restarts=-1)
class Worker(FaultAwareApply):
def __init__(
self,
config: AlgorithmConfig,
env_creator,
noise,
worker_index,
min_task_runtime=0.2,
):
# Set Python random, numpy, env, and torch/tf seeds.
seed = config.seed
if seed is not None:
# Python random module.
random.seed(seed)
# Numpy.
np.random.seed(seed)
# Torch.
if config.framework_str == "torch":
set_torch_seed(seed)
self.min_task_runtime = min_task_runtime
self.config = config
self.noise = SharedNoiseTable(noise)
env_context = EnvContext(self.config.env_config, worker_index)
self.env = env_creator(env_context)
# Seed the env, if gym.Env.
if not hasattr(self.env, "seed"):
logger.info("Env doesn't support env.seed(): {}".format(self.env))
# Gym.env.
else:
self.env.seed(seed)
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(self.env)
policy_cls = get_policy_class(self.config)
self.policy = policy_cls(
self.env.observation_space, self.env.action_space, config.to_dict()
)
@property
def filters(self):
return {DEFAULT_POLICY_ID: self.policy.observation_filter}
def sync_filters(self, new_filters):
for k in self.filters:
self.filters[k].sync(new_filters[k])
def get_filters(self, flush_after=False):
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.reset_buffer()
return return_filters
def rollout(self, timestep_limit, add_noise=False):
rollout_rewards, rollout_fragment_length = rollout(
self.policy,
self.env,
timestep_limit=timestep_limit,
add_noise=add_noise,
offset=self.config.offset,
)
return rollout_rewards, rollout_fragment_length
def do_rollouts(self, params, timestep_limit=None):
# Set the network weights.
self.policy.set_flat_weights(params)
noise_indices, returns, sign_returns, lengths = [], [], [], []
eval_returns, eval_lengths = [], []
# Perform some rollouts with noise.
while len(noise_indices) == 0:
if np.random.uniform() < self.config.eval_prob:
# Do an evaluation run with no perturbation.
self.policy.set_flat_weights(params)
rewards, length = self.rollout(timestep_limit, add_noise=False)
eval_returns.append(rewards.sum())
eval_lengths.append(length)
else:
# Do a regular run with parameter perturbations.
noise_index = self.noise.sample_index(self.policy.num_params)
perturbation = self.config.noise_stdev * self.noise.get(
noise_index, self.policy.num_params
)
# These two sampling steps could be done in parallel on
# different actors letting us update twice as frequently.
self.policy.set_flat_weights(params + perturbation)
rewards_pos, lengths_pos = self.rollout(timestep_limit)
self.policy.set_flat_weights(params - perturbation)
rewards_neg, lengths_neg = self.rollout(timestep_limit)
noise_indices.append(noise_index)
returns.append([rewards_pos.sum(), rewards_neg.sum()])
sign_returns.append(
[np.sign(rewards_pos).sum(), np.sign(rewards_neg).sum()]
)
lengths.append([lengths_pos, lengths_neg])
return Result(
noise_indices=noise_indices,
noisy_returns=returns,
sign_noisy_returns=sign_returns,
noisy_lengths=lengths,
eval_returns=eval_returns,
eval_lengths=eval_lengths,
)
def get_policy_class(config: AlgorithmConfig):
if config.framework_str == "torch":
from ray.rllib.algorithms.ars.ars_torch_policy import ARSTorchPolicy
policy_cls = ARSTorchPolicy
else:
policy_cls = ARSTFPolicy
return policy_cls
class ARS(Algorithm):
"""Large-scale implementation of Augmented Random Search in Ray."""
@classmethod
@override(Algorithm)
def get_default_config(cls) -> AlgorithmConfig:
return ARSConfig()
@override(Algorithm)
def setup(self, config: AlgorithmConfig):
# Setup our config: Merge the user-supplied config (which could
# be a partial config dict with the class' default).
if isinstance(config, dict):
self.config = self.get_default_config().update_from_dict(config)
# Validate our config dict.
self.config.validate()
# Generate the local env.
env_context = EnvContext(self.config.env_config or {}, worker_index=0)
env = self.env_creator(env_context)
self.callbacks = self.config.callbacks_class()
self._policy_class = get_policy_class(self.config)
self.policy = self._policy_class(
env.observation_space, env.action_space, self.config.to_dict()
)
self.optimizer = optimizers.SGD(self.policy, self.config.sgd_stepsize)
self.rollouts_used = self.config.rollouts_used
self.num_rollouts = self.config.num_rollouts
self.report_length = self.config.report_length
# Create the shared noise table.
logger.info("Creating shared noise table.")
noise_id = create_shared_noise.remote(self.config.noise_size)
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
logger.info("Creating actors.")
remote_workers = [
Worker.remote(self.config, self.env_creator, noise_id, idx + 1)
for idx in range(self.config.num_rollout_workers)
]
self.workers = WorkerSet._from_existing(
local_worker=None,
remote_workers=remote_workers,
)
self.episodes_so_far = 0
self.reward_list = []
self.tstart = time.time()
@override(Algorithm)
def get_policy(self, policy=DEFAULT_POLICY_ID):
if policy != DEFAULT_POLICY_ID:
raise ValueError(
"ARS has no policy '{}'! Use {} "
"instead.".format(policy, DEFAULT_POLICY_ID)
)
return self.policy
@override(Algorithm)
def step(self):
config = self.config
theta = self.policy.get_flat_weights()
assert theta.dtype == np.float32
assert len(theta.shape) == 1
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["num_rollouts"]
)
# Update our sample steps counters.
self._counters[NUM_AGENT_STEPS_SAMPLED] += num_timesteps
self._counters[NUM_ENV_STEPS_SAMPLED] += num_timesteps
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (
len(all_noise_indices)
== len(all_training_returns)
== len(all_training_lengths)
)
self.episodes_so_far += num_episodes
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# keep only the best returns
# select top performing directions if rollouts_used < num_rollouts
max_rewards = np.max(noisy_returns, axis=1)
if self.rollouts_used > self.num_rollouts:
self.rollouts_used = self.num_rollouts
percentile = 100 * (1 - (self.rollouts_used / self.num_rollouts))
idx = np.arange(max_rewards.size)[
max_rewards >= np.percentile(max_rewards, percentile)
]
noise_idx = noise_indices[idx]
noisy_returns = noisy_returns[idx, :]
# Compute and take a step.
g, count = utils.batched_weighted_sum(
noisy_returns[:, 0] - noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params) for index in noise_idx),
batch_size=min(500, noisy_returns[:, 0].size),
)
g /= noise_idx.size
# scale the returns by their standard deviation
if not np.isclose(np.std(noisy_returns), 0.0):
g /= np.std(noisy_returns)
assert g.shape == (self.policy.num_params,) and g.dtype == np.float32
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g)
# Update our train steps counters.
self._counters[NUM_AGENT_STEPS_TRAINED] += num_timesteps
self._counters[NUM_ENV_STEPS_TRAINED] += num_timesteps
# Set the new weights in the local copy of the policy.
self.policy.set_flat_weights(theta)
# update the reward list
if len(all_eval_returns) > 0:
self.reward_list.append(eval_returns.mean())
# Bring restored workers back if necessary.
self.restore_workers(self.workers)
# Now sync the filters
FilterManager.synchronize(
{DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers
)
info = {
"weights_norm": np.square(theta).sum(),
"weights_std": np.std(theta),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
}
result = dict(
episode_reward_mean=np.mean(self.reward_list[-self.report_length :]),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info,
)
return result
@override(Algorithm)
def cleanup(self):
self.workers.stop()
@override(Algorithm)
def restore_workers(self, workers: WorkerSet):
restored = self.workers.probe_unhealthy_workers()
if restored:
self._sync_weights_to_workers(worker_set=self.workers, worker_ids=restored)
@override(Algorithm)
def compute_single_action(self, observation, *args, **kwargs):
action, _, _ = self.policy.compute_actions([observation], update=True)
if kwargs.get("full_fetch"):
return action[0], [], {}
return action[0]
@override(Algorithm)
def _sync_weights_to_workers(self, *, worker_set=None, worker_ids=None):
# Broadcast the new policy weights to all evaluation workers.
assert worker_set is not None
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.policy.get_flat_weights())
worker_set.foreach_worker(
lambda w: w.foreach_policy(
lambda p, _: p.set_flat_weights(ray.get(weights))
),
local_worker=False,
remote_worker_ids=worker_ids,
)
def _collect_results(self, theta_id, min_episodes):
num_episodes, num_timesteps = 0, 0
results = []
while num_episodes < min_episodes:
logger.debug(
"Collected {} episodes {} timesteps so far this iter".format(
num_episodes, num_timesteps
)
)
rollout_ids = self.workers.foreach_worker(
func=lambda w: w.do_rollouts(ray.get(theta_id)),
local_worker=False,
)
# Get the results of the rollouts.
for result in rollout_ids:
results.append(result)
# Update the number of episodes and the number of timesteps
# keeping in mind that result.noisy_lengths is a list of lists,
# where the inner lists have length 2.
num_episodes += sum(len(pair) for pair in result.noisy_lengths)
num_timesteps += sum(sum(pair) for pair in result.noisy_lengths)
return results, num_episodes, num_timesteps
def __getstate__(self):
return {
"weights": self.policy.get_flat_weights(),
"filter": self.policy.observation_filter,
"episodes_so_far": self.episodes_so_far,
}
def __setstate__(self, state):
self.episodes_so_far = state["episodes_so_far"]
self.policy.set_flat_weights(state["weights"])
self.policy.observation_filter = state["filter"]
FilterManager.synchronize(
{DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers
)
# Deprecated: Use ray.rllib.algorithms.ars.ARSConfig instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(ARSConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.ars.ars.DEFAULT_CONFIG",
new="ray.rllib.algorithms.ars.ars.ARSConfig(...)",
error=True,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
| apache-2.0 |
ray-project/ray | python/ray/tests/lightgbm/simple_tune.py | 1 | 2517 | from sklearn import datasets
from sklearn.model_selection import train_test_split
from lightgbm_ray import RayDMatrix, RayParams, train
# __train_begin__
num_cpus_per_actor = 2
num_actors = 1
def train_model(config):
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)
train_set = RayDMatrix(train_x, train_y)
test_set = RayDMatrix(test_x, test_y)
evals_result = {}
bst = train(
params=config,
dtrain=train_set,
evals=[(test_set, "eval")],
evals_result=evals_result,
verbose_eval=False,
ray_params=RayParams(num_actors=num_actors, cpus_per_actor=num_cpus_per_actor),
)
bst.booster_.save_model("model.lgbm")
# __train_end__
# __load_begin__
def load_best_model(best_logdir):
import lightgbm as lgbm
import os
best_bst = lgbm.Booster(model_file=os.path.join(best_logdir, "model.lgbm"))
return best_bst
# __load_end__
def main():
# __tune_begin__
from ray import tune
# Set config
config = {
"objective": "binary",
"metric": ["binary_logloss", "binary_error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
}
# __tune_end__
# __tune_run_begin__
analysis = tune.run(
train_model,
config=config,
metric="eval-binary_error",
mode="min",
num_samples=4,
resources_per_trial=RayParams(
num_actors=num_actors, cpus_per_actor=num_cpus_per_actor
).get_tune_resources(),
)
# Load in the best performing model.
best_bst = load_best_model(analysis.best_logdir)
# Use the following code block instead if using Ray Client.
# import ray
# if ray.util.client.ray.is_connected():
# # If using Ray Client best_logdir is a directory on the server.
# # So we want to make sure we wrap model loading in a task.
# remote_load_fn = ray.remote(load_best_model)
# best_bst = ray.get(remote_load_fn.remote(analysis.best_logdir))
# Do something with the best model.
_ = best_bst
accuracy = 1.0 - analysis.best_result["eval-binary_error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
# __tune_run_end__
if __name__ == "__main__":
main()
| apache-2.0 |
johanvdw/Fiona | fiona/fio/cat.py | 1 | 20823 | from functools import partial
import json
import logging
import sys
import click
import fiona
from fiona.transform import transform_geom
from fiona.fio.cli import cli, obj_gen
def make_ld_context(context_items):
"""Returns a JSON-LD Context object.
See http://json-ld.org/spec/latest/json-ld."""
ctx = {
"@context": {
"geojson": "http://ld.geojson.org/vocab#",
"Feature": "geojson:Feature",
"FeatureCollection": "geojson:FeatureCollection",
"GeometryCollection": "geojson:GeometryCollection",
"LineString": "geojson:LineString",
"MultiLineString": "geojson:MultiLineString",
"MultiPoint": "geojson:MultiPoint",
"MultiPolygon": "geojson:MultiPolygon",
"Point": "geojson:Point",
"Polygon": "geojson:Polygon",
"bbox": {
"@container": "@list",
"@id": "geojson:bbox"
},
"coordinates": "geojson:coordinates",
"datetime": "http://www.w3.org/2006/time#inXSDDateTime",
"description": "http://purl.org/dc/terms/description",
"features": {
"@container": "@set",
"@id": "geojson:features"
},
"geometry": "geojson:geometry",
"id": "@id",
"properties": "geojson:properties",
"start": "http://www.w3.org/2006/time#hasBeginning",
"stop": "http://www.w3.org/2006/time#hasEnding",
"title": "http://purl.org/dc/terms/title",
"type": "@type",
"when": "geojson:when"
}
}
for item in context_items or []:
t, uri = item.split("=")
ctx[t.strip()] = uri.strip()
return ctx
def id_record(rec):
"""Converts a record's id to a blank node id and returns the record."""
rec['id'] = '_:f%s' % rec['id']
return rec
# Cat command
@cli.command(short_help="Concatenate and print the features of datasets")
# One or more files.
@click.argument('input', nargs=-1, type=click.Path(exists=True))
# Coordinate precision option.
@click.option('--precision', type=int, default=-1, metavar="N",
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int, metavar="N",
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--dst_crs', default=None, metavar="EPSG:NNNN",
help="Destination CRS.")
# Use ASCII RS control code to signal a sequence item (False is default).
# See http://tools.ietf.org/html/draft-ietf-json-text-sequence-05.
# Experimental.
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=True,
help="Use RS as text separator instead of LF. Experimental.")
@click.option('--bbox', default=None, metavar="w,s,e,n",
help="filter for features intersecting a bounding box")
@click.pass_context
def cat(ctx, input, precision, indent, compact, ignore_errors, dst_crs,
x_json_seq_rs, bbox):
"""Concatenate and print the features of input datasets as a
sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
for path in input:
with fiona.open(path) as src:
if bbox:
bbox = tuple(map(float, bbox.split(',')))
for i, feat in src.items(bbox=bbox):
if dst_crs or precision > 0:
g = transform_geom(
src.crs, dst_crs, feat['geometry'],
antimeridian_cutting=True,
precision=precision)
feat['geometry'] = g
feat['bbox'] = fiona.bounds(g)
if x_json_seq_rs:
sink.write(u'\u001e')
json.dump(feat, sink, **dump_kwds)
sink.write("\n")
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Collect command
@cli.command(short_help="Collect a sequence of features.")
# Coordinate precision option.
@click.option('--precision', type=int, default=-1, metavar="N",
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int, metavar="N",
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--src_crs', default=None, metavar="EPSG:NNNN",
help="Source CRS.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.pass_context
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
src_crs, with_ld_context, add_ld_context_item):
"""Make a GeoJSON feature collection from a sequence of GeoJSON
features and print it."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
if src_crs:
transformer = partial(transform_geom, src_crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
else:
transformer = lambda x: x
first_line = next(stdin)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
else:
def feature_gen():
feat = json.loads(first_line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
# Try the first record.
try:
i, first = 0, next(source)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(source, 1):
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {'type': 'FeatureCollection'}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(rec) for rec in source]
else:
collection['features'] = list(source)
json.dump(collection, sink, **dump_kwds)
sink.write("\n")
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Distribute command
@cli.command(short_help="Distribute features from a collection")
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=False,
help="Use RS as text separator instead of LF. "
"Experimental (default: no).")
@click.pass_context
def distrib(ctx, x_json_seq_rs):
"""Print the features of GeoJSON objects read from stdin.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
try:
source = obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if obj.get('type') == 'FeatureCollection':
feat['parent'] = obj_id
feat_id = feat.get('id', 'feature:' + str(i))
feat['id'] = feat_id
stdout.write(json.dumps(feat))
stdout.write('\n')
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
# Dump command
@cli.command(short_help="Dump a dataset to GeoJSON.")
@click.argument('input', type=click.Path(), required=True)
@click.option('--encoding', help="Specify encoding of the input file.")
# Coordinate precision option.
@click.option('--precision', type=int, default=-1,
help="Decimal precision of coordinates.")
@click.option('--indent', default=None, type=int,
help="Indentation level for pretty printed output.")
@click.option('--compact/--no-compact', default=False,
help="Use compact separators (',', ':').")
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD context.")
@click.option('--x-json-seq/--x-json-obj', default=False,
help="Write a LF-delimited JSON sequence (default is object). "
"Experimental.")
# Use ASCII RS control code to signal a sequence item (False is default).
# See http://tools.ietf.org/html/draft-ietf-json-text-sequence-05.
# Experimental.
@click.option('--x-json-seq-rs/--x-json-seq-no-rs', default=True,
help="Use RS as text separator. Experimental.")
@click.pass_context
def dump(ctx, input, encoding, precision, indent, compact, record_buffered,
ignore_errors, with_ld_context, add_ld_context_item,
x_json_seq, x_json_seq_rs):
"""Dump a dataset either as a GeoJSON feature collection (the default)
or a sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
open_kwds = {}
if encoding:
open_kwds['encoding'] = encoding
def transformer(crs, feat):
tg = partial(transform_geom, crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
feat['geometry'] = tg(feat['geometry'])
return feat
try:
with fiona.drivers(CPL_DEBUG=verbosity>2):
with fiona.open(input, **open_kwds) as source:
meta = source.meta
meta['fields'] = dict(source.schema['properties'].items())
if x_json_seq:
for feat in source:
feat = transformer(source.crs, feat)
if x_json_seq_rs:
sink.write(u'\u001e')
json.dump(feat, sink, **dump_kwds)
sink.write("\n")
elif record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs'],
'features': [] }
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
itr = iter(source)
# Try the first record.
try:
i, first = 0, next(itr)
first = transformer(first)
if with_ld_context:
first = id_record(first)
if indented:
sink.write(rec_indent)
sink.write(
json.dumps(first, **dump_kwds
).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(itr, 1):
rec = transformer(rec)
try:
if with_ld_context:
rec = id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(
json.dumps(rec, **dump_kwds
).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs']}
if with_ld_context:
collection['@context'] = make_ld_context(
add_ld_context_item)
collection['features'] = [
id_record(transformer(rec)) for rec in source]
else:
collection['features'] = [transformer(source.crs, rec) for rec in source]
json.dump(collection, sink, **dump_kwds)
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1)
| bsd-3-clause |
modelblocks/modelblocks-release | resource-incrsem/scripts/mlpdecpars2nmodel.py | 1 | 23877 | import sys, configparser, torch, re, os, time
from collections import Counter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.sparse import csr_matrix
import pdb
np.set_printoptions(threshold=sys.maxsize)
BASEKVOCABSIZE = 0
ANTEKVOCABSIZE = 0
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def ensure_binary(data):
'''confirms two labels are present. if not, adds 1 additional training example with label flipped'''
labels = set()
for line in data:
try:
_, _, _, _, _, _, _, _, label = line.split(" ")
except:
eprint("WARNING: mlpdecpars spec not observed: {}".format(line))
continue
labels.add(label)
if len(labels) == 1:
eprint("only one label found, adding fake training example...")
to_flip = data[0]
if to_flip[-1] == "1":
new_example = to_flip[:-1]+"0"
elif to_flip[-1] == "0":
new_example = to_flip[:-1]+"1"
else:
eprint("WARNING: data format not supported!")
eprint("adding: {}".format(new_example))
data.insert(0,new_example) #prepend new training example to data
return data
def prepare_data():
data = [line.strip() for line in sys.stdin]
data = ensure_binary(data)
catBases, catAntes, hvBases, hvAntes, hvBaseFirsts, hvAnteFirsts, wordDists, sqWordDists, corefOns, labels = ([] for _ in range(10))
#depth, catBase, hvBase, hvFiller, fDecs, hvBFirst, hvFFirst = ([] for _ in range(8))
for line in data:
try:
catBase, catAnte, hvBase, hvAnte, wordDist, sqWordDist, corefOn, _, label = line.split(" ")
except:
eprint("unspec line: {}".format(line))
continue
#raise Exception("out of spec line in input")
#d, cb, hvb, hvf, fd = line.split(" ")
#depth.append(int(d))
#catBase.append(cb)
#hvBase.append(hvb)
#hvFiller.append(hvf)
#fDecs.append(fd)
catBases.append(catBase)
catAntes.append(catAnte)
hvBases.append(hvBase)
hvAntes.append(hvAnte)
wordDists.append(int(wordDist))
sqWordDists.append(int(sqWordDist))
corefOns.append(int(corefOn))
labels.append(int(label))
eprint("Linesplit complete")
# Extract first KVec from sparse HVec
for hvec in hvBases:
match = re.findall(r"^\[(.*?)\]", hvec)
hvBaseFirsts.append(match[0].split(","))
eprint("hvBaseFirsts ready")
global BASEKVOCABSIZE
BASEKVOCABSIZE = len(hvBaseFirsts)
for hvec in hvAntes:
match = re.findall(r"^\[(.*?)\]", hvec)
hvAnteFirsts.append(match[0].split(","))
eprint("hvAnteFirsts ready")
global ANTEKVOCABSIZE
ANTEKVOCABSIZE = len(hvAnteFirsts)
# Mapping from category & HVec to index
flat_hvB = [hvec for sublist in hvBaseFirsts for hvec in sublist if hvec not in ["", "Bot", "Top"]]
flat_hvA = [hvec for sublist in hvAnteFirsts for hvec in sublist if hvec not in ["", "Bot", "Top"]]
allCats = set(catBases).union(set(catAntes))
cat_to_ix = {cat: i for i, cat in enumerate(sorted(set(allCats)))}
#fdecs_to_ix = {fdecs: i for i, fdecs in enumerate(sorted(set(fDecs)))}
hvec_to_ix = {hvec: i for i, hvec in enumerate(sorted(set(flat_hvB + flat_hvA)))}
cat_base_ixs = [cat_to_ix[cat] for cat in catBases]
cat_ante_ixs = [cat_to_ix[cat] for cat in catAntes]
#fdecs_ix = [fdecs_to_ix[fdecs] for fdecs in fDecs]
hvb_row, hvb_col, hvb_top, hva_row, hva_col, hva_top = ([] for _ in range(6))
# KVec index sparse matrix and "Top" KVec counts
for i, sublist in enumerate(hvBaseFirsts):
top_count = 0
for hvec in sublist:
if hvec == "Top":
top_count += 1
elif hvec == "" or hvec == "Bot":
continue
else:
hvb_row.append(i)
hvb_col.append(hvec_to_ix[hvec])
hvb_top.append([top_count])
hvb_mat = csr_matrix((np.ones(len(hvb_row), dtype=np.int32), (hvb_row, hvb_col)),
shape=(len(hvBaseFirsts), len(hvec_to_ix)))
eprint("hvb_mat ready")
for i, sublist in enumerate(hvAnteFirsts):
top_count = 0
for hvec in sublist:
if hvec == "Top":
top_count += 1
elif hvec == "" or hvec == "Bot":
continue
else:
hva_row.append(i)
hva_col.append(hvec_to_ix[hvec])
hva_top.append([top_count])
hva_mat = csr_matrix((np.ones(len(hva_row), dtype=np.int32), (hva_row, hva_col)),
shape=(len(hvAnteFirsts), len(hvec_to_ix)))
eprint("hva_mat ready")
eprint("Number of input KVecs: {}".format(len(hvec_to_ix)))
#eprint("Number of output F categories: {}".format(len(fdecs_to_ix)))
return cat_to_ix, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvec_to_ix, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels
#return depth, cat_b_ix, hvb_mat, hvf_mat, cat_to_ix, fdecs_ix, fdecs_to_ix, hvec_to_ix, hvb_top, hvf_top
def prepare_data_dev(dev_decpars_file, cat_to_ix, hvec_to_ix):
with open(dev_decpars_file, "r") as f:
data = f.readlines()
data = [line.strip() for line in data]
catBases, catAntes, hvBases, hvAntes, hvBaseFirsts, hvAnteFirsts, wordDists, sqWordDists, corefOns, labels = ([] for _ in range(10))
eprint("finished reading dev data. beginning processing...")
for line in data:
catBase, catAnte, hvBase, hvAnte, wordDist, sqWordDist, corefOn, _, label = line.split(" ")
if catBase not in cat_to_ix or catAnte not in cat_to_ix:
continue
catBases.append(catBase)
catAntes.append(catAnte)
hvBases.append(hvBase)
hvAntes.append(hvAnte)
wordDists.append(int(wordDist))
sqWordDists.append(int(sqWordDist))
corefOns.append(int(corefOn))
labels.append(int(label))
for kvec in hvBases:
match = re.findall(r"^\[(.*?)\]", kvec)
hvBaseFirsts.append(match[0].split(","))
for kvec in hvAntes:
match = re.findall(r"^\[(.*?)\]", kvec)
hvAnteFirsts.append(match[0].split(","))
cat_b_ix = [cat_to_ix[cat] for cat in catBases]
cat_a_ix = [cat_to_ix[cat] for cat in catAntes]
hvb_row, hvb_col, hva_row, hva_col, hvb_top, hva_top = ([] for _ in range(6))
# KVec indices and "Top" KVec counts
for i, sublist in enumerate(hvBaseFirsts):
top_count = 0
for hvec in sublist:
if hvec == "Top":
top_count += 1
elif hvec == "" or hvec == "Bot" or hvec not in hvec_to_ix:
continue
else:
hvb_row.append(i)
hvb_col.append(hvec_to_ix[hvec])
hvb_top.append([top_count])
hvb_mat = csr_matrix((np.ones(len(hvb_row), dtype=np.int32), (hvb_row, hvb_col)),
shape=(len(hvBaseFirsts), len(hvec_to_ix)))
for i, sublist in enumerate(hvAnteFirsts):
top_count = 0
for hvec in sublist:
if hvec == "Top":
top_count += 1
elif hvec == "" or hvec == "Bot" or hvec not in hvec_to_ix:
continue
else:
hva_row.append(i)
hva_col.append(hvec_to_ix[hvec])
hva_top.append([top_count])
hva_mat = csr_matrix((np.ones(len(hva_row), dtype=np.int32), (hva_row, hva_col)),
shape=(len(hvAnteFirsts), len(hvec_to_ix)))
return cat_b_ix, cat_a_ix, hvb_mat, hva_mat, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels
class NModel(nn.Module):
def __init__(self, cat_vocab_size, hvec_vocab_size, syn_size, sem_size, hidden_dim, output_dim, dropout_prob):
super(NModel, self).__init__()
self.hvec_vocab_size = hvec_vocab_size
self.sem_size = sem_size
self.cat_embeds = nn.Embedding(cat_vocab_size, syn_size)
self.hvec_embeds = nn.Embedding(hvec_vocab_size, sem_size)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.dropout_prob = dropout_prob
self.fc1 = nn.Linear(2*syn_size+2*sem_size+3, self.hidden_dim, bias=True)
self.dropout = nn.Dropout(self.dropout_prob)
self.relu = F.relu
self.fc2 = nn.Linear(self.hidden_dim, self.output_dim, bias=True)
def forward(self, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvb_top, hva_top, worddists, sqworddists, corefons, use_gpu, ablate_sem):
#def forward(self, d_onehot, cat_b_ix, hvb_mat, hvf_mat, hvb_top, hvf_top, use_gpu, ablate_sem):
cat_base_embed = self.cat_embeds(cat_base_ixs)
cat_ante_embed = self.cat_embeds(cat_ante_ixs)
hvb_top = torch.FloatTensor(hvb_top)
hva_top = torch.FloatTensor(hva_top)
if use_gpu > 0:
cat_base_embed = cat_base_embed.to("cuda")
cat_ante_embed = cat_ante_embed.to("cuda")
hvb_top = hvb_top.to("cuda")
hva_top = hva_top.to("cuda")
if ablate_sem:
hvb_embed = torch.zeros([hvb_top.shape[0], self.sem_size], dtype=torch.float) + hvb_top
hva_embed = torch.zeros([hva_top.shape[0], self.sem_size], dtype=torch.float) + hvf_top
else:
hvb_mat = hvb_mat.tocoo()
hvb_mat = torch.sparse.FloatTensor(torch.LongTensor([hvb_mat.row.tolist(), hvb_mat.col.tolist()]),
torch.FloatTensor(hvb_mat.data.astype(np.float32)),
torch.Size(hvb_mat.shape))
hva_mat = hva_mat.tocoo()
hva_mat = torch.sparse.FloatTensor(torch.LongTensor([hva_mat.row.tolist(), hva_mat.col.tolist()]),
torch.FloatTensor(hva_mat.data.astype(np.float32)),
torch.Size(hva_mat.shape))
if use_gpu > 0:
hvb_mat = hvb_mat.to("cuda")
hva_mat = hva_mat.to("cuda")
hvb_embed = torch.sparse.mm(hvb_mat, self.hvec_embeds.weight) + hvb_top
hva_embed = torch.sparse.mm(hva_mat, self.hvec_embeds.weight) + hva_top
if use_gpu > 0:
hvb_embed = hvb_embed.to("cuda")
hva_embed = hva_embed.to("cuda")
x = torch.cat((cat_base_embed, cat_ante_embed, hvb_embed, hva_embed, worddists.unsqueeze(dim=1), sqworddists.unsqueeze(dim=1), corefons.unsqueeze(dim=1)), 1)
np.set_printoptions(threshold=sys.maxsize)
torch.set_printoptions(threshold=sys.maxsize)
#eprint(x)
#eprint(hvb_mat[0,:])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def print_examples(cat_to_ix, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvec_to_ix, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels):
'''WARNING: don't try to do this for a big dataset - it will run out of memory trying to change sparse to full matrix'''
ix_to_cat = {v: k for k, v in cat_to_ix.items()}
ix_to_hvec = {v: k for k, v in hvec_to_ix.items()}
ex_idxs = range(len(cat_base_ixs))
eprint("example data:")
for ex_idx in ex_idxs:
eprint("base_ix: {} base_cat: {} ante_ix: {} ante_cat: {} worddist: {} sqworddist: {} corefon: {} label: {}".format(cat_base_ixs[ex_idx],ix_to_cat[cat_base_ixs[ex_idx]], cat_ante_ixs[ex_idx], ix_to_cat[cat_ante_ixs[ex_idx]], wordDists[ex_idx], sqWordDists[ex_idx], corefOns[ex_idx], labels[ex_idx]))
eprint(" base hvecs: {}".format(",".join([ ix_to_hvec[i] for i,val in enumerate(hvb_mat.toarray()[ex_idx]) if val > 0 ])))
eprint(" antecedent hvecs: {}".format(",".join([ ix_to_hvec[i] for i,val in enumerate(hva_mat.toarray()[ex_idx]) if val > 0 ])))
#eprint("hvec b: {} hvec a: {}".format([ix_to_hvec[x] for x in hvb_mat[ex_idx] if x != 0], [ix_to_hvec[x] for x in hva_mat[ex_idx] if x != 0]))
def train(use_dev, dev_decpars_file, use_gpu, syn_size, sem_size, hidden_dim, dropout_prob,
num_epochs, batch_size, learning_rate, weight_decay, l2_reg,
ablate_sem, useClassFreqWeighting):
#depth, cat_b_ix, hvb_mat, hvf_mat, cat_to_ix, fdecs_ix, fdecs_to_ix, hvec_to_ix, hvb_top, hvf_top = prepare_data()
cat_to_ix, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvec_to_ix, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels = prepare_data()
#cat_to_ix, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvec_to_ix, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels = prepare_data()
#print_examples( cat_to_ix, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvec_to_ix, hvb_top, hva_top, wordDists, sqWordDists, corefOns, labels)
#depth = F.one_hot(torch.LongTensor(depth), 7).float()
#cat_b_ix = torch.LongTensor(cat_b_ix)
#target = torch.LongTensor(fdecs_ix)
#model = FModel(len(cat_to_ix), len(hvec_to_ix), syn_size, sem_size, hidden_dim, len(fdecs_to_ix))
cat_base_ixs = torch.LongTensor(cat_base_ixs)
cat_ante_ixs = torch.LongTensor(cat_ante_ixs)
#hvBases = torch.FloatTensor(hvBases)
#hvAntes = torch.FloatTensor(hvAntes)
wordDists = torch.LongTensor(wordDists)
sqWordDists = torch.LongTensor(sqWordDists)
corefOns = torch.LongTensor(corefOns)
target = torch.LongTensor(labels)
outputdim = len(set(target.tolist()))
assert outputdim == 2
model = NModel(len(cat_to_ix), len(hvec_to_ix), syn_size, sem_size, hidden_dim, outputdim, dropout_prob)
if use_gpu > 0:
#depth = depth.to("cuda")
cat_base_ixs = cat_base_ixs.to("cuda")
cat_ante_ixs = cat_ante_ixs.to("cuda")
#hvBases = hvBases.to("cuda")
#hvAntes = hvAntes.to("cuda")
wordDists = wordDists.to("cuda")
sqWordDists = sqWordDists.to("cuda")
corefOns = corefOns.to("cuda")
target = target.to("cuda")
model = model.cuda()
#cat_base_ixs = cat_base_ixs.to("cuda")
#target = target.to("cuda")
#model = model.cuda()
if use_dev > 0:
#dev_depth, dev_cat_b_mat, dev_hvb_mat, dev_hvf_mat, dev_fdecs_ix, dev_hvb_top, dev_hvf_top = prepare_data_dev(
# dev_decpars_file, cat_to_ix, fdecs_to_ix, hvec_to_ix)
dev_cat_b_ix, dev_cat_a_ix, dev_hvb_mat, dev_hva_mat, dev_hvb_top, dev_hva_top, dev_worddists, dev_sqworddists, dev_corefons, dev_labels = prepare_data_dev(dev_decpars_file, cat_to_ix, hvec_to_ix)
#dev_depth = F.one_hot(torch.LongTensor(dev_depth), 7).float()
dev_cat_b_ix = torch.LongTensor(dev_cat_b_ix) #TODO fix dev_cat_b_ix not assigned yet
dev_cat_a_ix = torch.LongTensor(dev_cat_a_ix)
dev_worddists = torch.LongTensor(dev_worddists)
dev_sqworddists = torch.LongTensor(dev_sqworddists)
dev_corefons = torch.LongTensor(dev_corefons)
dev_target = torch.LongTensor(dev_labels)
if use_gpu > 0:
dev_cat_b_ix = dev_cat_b_ix.to("cuda")
dev_cat_a_ix = dev_cat_a_ix.to("cuda")
dev_worddists = dev_worddists.to("cuda")
dev_sqworddists = dev_sqworddists.to("cuda")
dev_corefons = dev_corefons.to("cuda")
dev_target = dev_target.to("cuda")
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
#TODO implement useClassFreqWeighting
criterion = nn.NLLLoss()
# training loop
eprint("Start NModel training...")
epoch = 0
while True:
c0 = time.time()
model.train()
epoch += 1
permutation = torch.randperm(len(target))
total_train_correct = 0
total_train_loss = 0
total_dev_loss = 0
for i in range(0, len(target), batch_size):
indices = permutation[i:i + batch_size]
#indices = range(i,i+batch_size)
#eprint("using indices: {} to {}".format(i,i+batch_size))
batch_catbase, batch_catante, batch_worddist, batch_sqworddist, batch_corefon, batch_target = cat_base_ixs[indices], cat_ante_ixs[indices], wordDists[indices], sqWordDists[indices], corefOns[indices], target[indices]
#batch_d, batch_c, batch_target = depth[indices], cat_b_ix[indices], target[indices]
batch_hvb_mat, batch_hva_mat = hvb_mat[np.array(indices), :], hva_mat[np.array(indices), :]
batch_hvb_top, batch_hva_top = [hvb_top[i] for i in indices], [hva_top[i] for i in indices]
if use_gpu > 0:
l2_loss = torch.cuda.FloatTensor([0])
else:
l2_loss = torch.FloatTensor([0])
for param in model.parameters():
if torch.numel(param) == 0:
continue
l2_loss += torch.mean(param.pow(2))
#output = model(batch_d, batch_c, batch_hvb_mat, batch_hvf_mat, batch_hvb_top, batch_hvf_top, use_gpu,
output = model(batch_catbase, batch_catante, batch_hvb_mat,
batch_hva_mat, batch_hvb_top, batch_hva_top,
batch_worddist.float(), batch_sqworddist.float(),
batch_corefon.float(), use_gpu, ablate_sem)
_, ndec = torch.max(output.data, 1)
train_correct = (ndec == batch_target).sum().item()
total_train_correct += train_correct
nll_loss = criterion(output, batch_target)
loss = nll_loss + l2_reg * l2_loss
total_train_loss += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
if use_dev > 0:
with torch.no_grad():
#dev_pred = model(dev_depth, dev_cat_b_ix, dev_hvb_mat, dev_hvf_mat, dev_hvb_top, dev_hvf_top, use_gpu,
dev_pred = model(dev_cat_b_ix, dev_cat_a_ix, dev_hvb_mat,
dev_hva_mat, dev_hvb_top, dev_hva_top,
dev_worddists.float(), dev_sqworddists.float(),
dev_corefons.float(), use_gpu, ablate_sem)
_, dev_ndec = torch.max(dev_pred.data, 1)
dev_correct = (dev_ndec == dev_target).sum().item()
dev_loss = criterion(dev_pred, dev_target)
total_dev_loss += dev_loss.item()
dev_acc = 100 * (dev_correct / len(dev_target))
else:
dev_acc = 0
eprint("Epoch {:04d} | AvgTrainLoss {:.4f} | TrainAcc {:.4f} | DevLoss {:.4f} | DevAcc {:.4f} | Time {:.4f}".
format(epoch, total_train_loss / ((len(target) // batch_size) + 1), 100 * (total_train_correct / len(target)),
total_dev_loss, dev_acc, time.time() - c0))
if epoch == num_epochs:
break
#return model, cat_to_ix, fdecs_to_ix, hvec_to_ix
#print batch cat and hvembed and dist feats
return model, cat_to_ix, hvec_to_ix
def main(config):
n_config = config["NModel"]
model, cat_to_ix, hvec_to_ix = train(n_config.getint("Dev"),
n_config.get("DevFile"),
n_config.getint("GPU"),
n_config.getint("SynSize"),
n_config.getint("SemSize"),
n_config.getint("HiddenSize"),
n_config.getfloat("DropoutProb"),
n_config.getint("NEpochs"),
n_config.getint("BatchSize"),
n_config.getfloat("LearningRate"),
n_config.getfloat("WeightDecay"),
n_config.getfloat("L2Reg"),
n_config.getboolean("AblateSem"),
n_config.getboolean("UseClassFreqWeighting")
)
if n_config.getint("GPU") >= 0:
cat_embeds = list(model.parameters())[0].data.cpu().numpy()
hvec_embeds = list(model.parameters())[1].data.cpu().numpy()
first_weights = list(model.parameters())[2].data.cpu().numpy()
first_biases = list(model.parameters())[3].data.cpu().numpy()
second_weights = list(model.parameters())[4].data.cpu().numpy()
second_biases = list(model.parameters())[5].data.cpu().numpy()
else:
cat_embeds = list(model.parameters())[0].data.numpy()
hvec_embeds = list(model.parameters())[1].data.numpy()
first_weights = list(model.parameters())[2].data.numpy()
first_biases = list(model.parameters())[3].data.numpy()
second_weights = list(model.parameters())[4].data.numpy()
second_biases = list(model.parameters())[5].data.numpy()
eprint(first_weights.shape, second_weights.shape)
print("N F " + ",".join(map(str, first_weights.flatten('F').tolist())))
print("N f " + ",".join(map(str, first_biases.flatten('F').tolist())))
print("N S " + ",".join(map(str, second_weights.flatten('F').tolist())))
print("N s " + ",".join(map(str, second_biases.flatten('F').tolist())))
for cat, ix in sorted(cat_to_ix.items()):
print("C " + str(cat) + " " + ",".join(map(str, cat_embeds[ix])))
if not n_config.getboolean("AblateSem"):
for hvec, ix in sorted(hvec_to_ix.items()):
print("K " + str(hvec) + " " + ",".join(map(str, hvec_embeds[ix])))
#for fdec, ix in sorted(fdecs_to_ix.items()):
# print("f " + str(ix) + " " + str(fdec))
'''
#run an arbitrary forward pass on trained model
model.eval()
#data = torch.randn(1, 3, 24, 24) # Load your data here, this is just dummy data
#data=np.array([[10,45],[11,12],[4,1]])
data = np.array([[0,0]])
rows = data[:,0]
cols = data[:,1]
#hva_mat = csr_matrix((np.ones(len(hva_row), dtype=np.int32), (hva_row, hva_col)), shape=(len(hvAnteFirsts), len(hvec_to_ix)))
eprint("length of hvec_to_ix: "+ str(len(hvec_to_ix)))
#eprint("basekvocabsize: {}".format(BASEKVOCABSIZE))
#eprint("antekvocabsize: {}".format(ANTEKVOCABSIZE))
eprint(data)
eprint(rows)
eprint(cols)
#emptycsrbase = csr_matrix((np.zeros(len(rows),dtype=np.int32), (rows,cols)), shape=(BASEKVOCABSIZE, len(hvec_to_ix))) #batch x kvocab
#emptycsrante = csr_matrix((np.zeros(len(rows),dtype=np.int32), (rows,cols)), shape=(ANTEKVOCABSIZE, len(hvec_to_ix))) #batch x kvocab
emptycsr = csr_matrix((np.zeros(len(rows),dtype=np.int32), (rows,cols)), shape=(1, len(hvec_to_ix))) #batch x kvocab
#output = model([cat_to_ix["T"]],[cat_to_ix["T"]], emptycsrbase, emptycsrante, [0], [0], [0], [0], [0], False,False)
zero = torch.LongTensor([0])
zerofloat = torch.FloatTensor([0])
output = model(torch.LongTensor([cat_to_ix["V-aN"]]),torch.LongTensor([cat_to_ix["T"]]), emptycsr, emptycsr, zerofloat, zerofloat, zerofloat, zerofloat, zerofloat, -1, False)
#output = model(torch.LongTensor([cat_to_ix["T"]]),torch.LongTensor([cat_to_ix["T"]]), emptycsr, emptycsr, zerofloat, zerofloat, zerofloat, zerofloat, zerofloat, -1, False)
#def forward(self, cat_base_ixs, cat_ante_ixs, hvb_mat, hva_mat, hvb_top, hva_top, worddists, sqworddists, corefons, use_gpu, ablate_sem):
#output = model(data)
prediction = torch.argmax(output)
eprint("V-aN,T,bot,bot,0,0,0 output on trained model: ")
#eprint("T,T,bot,bot,0,0,0 output on trained model: ")
eprint(output)
eprint("prediction: ")
eprint(prediction)
'''
if __name__ == "__main__":
config = configparser.ConfigParser(allow_no_value=True)
config.read(sys.argv[1])
for section in config:
eprint(section, dict(config[section]))
main(config)
| gpl-3.0 |
glennq/scikit-learn | sklearn/linear_model/passive_aggressive.py | 28 | 11542 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None, average=False):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False,
average=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
previtus/MGR-Project-Code | Settings/set4a_min-edge-datasets-tests/set4a_MinEdgeSize_mix_len10_kfold_d5.py | 1 | 1407 | def Setup(Settings, DefaultModel):
# set4a_min-edge-datasets-tests/set4a_MinEdgeSize_mix_len10_kfold_d5.py
Settings["experiment_name"] = "set4a_MinEdgeSize_mix_len10_kfold_d5"
Settings["graph_histories"] = [] # ['all','together',[],[1,0],[0,0,0],[]]
n = 0
#d1 5556x_markable_640x640 SegmentsData_marked_R100_4Tables
#d2 5556x_markable_640x640_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
#d3 5556x_minlen30_640px SegmentsData_marked_R100_4Tables.dump
#d4 5556x_minlen30_640px_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
#d5 5556x_minlen10_640px SegmentsData_marked_R100_4Tables.dump
#d6 5556x_minlen20_640px SegmentsData_marked_R100_4Tables.dump
Settings["models"][n]["dataset_name"] = "5556x_minlen10_640px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'img_osm_mix' # osm_only img_only img_osm_mix
Settings["models"][n]["unique_id"] = 'mix_with_min10'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 500
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
| mit |
cainiaocome/scikit-learn | sklearn/cluster/bicluster.py | 210 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
sshleifer/object_detection_kitti | lfads/synth_data/generate_itb_data.py | 8 | 8328 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
import tensorflow as tf
from utils import write_datasets
from synthetic_data_utils import normalize_rates
from synthetic_data_utils import get_train_n_valid_inds, nparray_and_transpose
from synthetic_data_utils import spikify_data, split_list_by_inds
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "itb_rnn",
"Name of data file for input case.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 800, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nspikifications", 5,
"Number of spikifications of the same underlying rates.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("max_firing_rate", 30.0,
"Map 1.0 of RNN to a spikes per second")
flags.DEFINE_float("u_std", 0.25,
"Std dev of input to integration to bound model")
flags.DEFINE_string("checkpoint_path", "SAMPLE_CHECKPOINT",
"""Path to directory with checkpoints of model
trained on integration to bound task. Currently this
is a placeholder which tells the code to grab the
checkpoint that is provided with the code
(in /trained_itb/..). If you have your own checkpoint
you would like to restore, you would point it to
that path.""")
FLAGS = flags.FLAGS
class IntegrationToBoundModel:
def __init__(self, N):
scale = 0.8 / float(N**0.5)
self.N = N
self.Wh_nxn = tf.Variable(tf.random_normal([N, N], stddev=scale))
self.b_1xn = tf.Variable(tf.zeros([1, N]))
self.Bu_1xn = tf.Variable(tf.zeros([1, N]))
self.Wro_nxo = tf.Variable(tf.random_normal([N, 1], stddev=scale))
self.bro_o = tf.Variable(tf.zeros([1]))
def call(self, h_tm1_bxn, u_bx1):
act_t_bxn = tf.matmul(h_tm1_bxn, self.Wh_nxn) + self.b_1xn + u_bx1 * self.Bu_1xn
h_t_bxn = tf.nn.tanh(act_t_bxn)
z_t = tf.nn.xw_plus_b(h_t_bxn, self.Wro_nxo, self.bro_o)
return z_t, h_t_bxn
def get_data_batch(batch_size, T, rng, u_std):
u_bxt = rng.randn(batch_size, T) * u_std
running_sum_b = np.zeros([batch_size])
labels_bxt = np.zeros([batch_size, T])
for t in xrange(T):
running_sum_b += u_bxt[:, t]
labels_bxt[:, t] += running_sum_b
labels_bxt = np.clip(labels_bxt, -1, 1)
return u_bxt, labels_bxt
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
u_rng = np.random.RandomState(seed=FLAGS.synth_data_seed+1)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N # must be same N as in trained model (provided example is N = 50)
nspikifications = FLAGS.nspikifications
E = nspikifications * C # total number of trials
train_percentage = FLAGS.train_percentage
ntimesteps = int(T / FLAGS.dt)
batch_size = 1 # gives one example per ntrial
model = IntegrationToBoundModel(N)
inputs_ph_t = [tf.placeholder(tf.float32,
shape=[None, 1]) for _ in range(ntimesteps)]
state = tf.zeros([batch_size, N])
saver = tf.train.Saver()
P_nxn = rng.randn(N,N) / np.sqrt(N) # random projections
# unroll RNN for T timesteps
outputs_t = []
states_t = []
for inp in inputs_ph_t:
output, state = model.call(state, inp)
outputs_t.append(output)
states_t.append(state)
with tf.Session() as sess:
# restore the latest model ckpt
if FLAGS.checkpoint_path == "SAMPLE_CHECKPOINT":
dir_path = os.path.dirname(os.path.realpath(__file__))
model_checkpoint_path = os.path.join(dir_path, "trained_itb/model-65000")
else:
model_checkpoint_path = FLAGS.checkpoint_path
try:
saver.restore(sess, model_checkpoint_path)
print ('Model restored from', model_checkpoint_path)
except:
assert False, ("No checkpoints to restore from, is the path %s correct?"
%model_checkpoint_path)
# generate data for trials
data_e = []
u_e = []
outs_e = []
for c in range(C):
u_1xt, outs_1xt = get_data_batch(batch_size, ntimesteps, u_rng, FLAGS.u_std)
feed_dict = {}
for t in xrange(ntimesteps):
feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1))
states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t],
feed_dict=feed_dict)
states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn)))
outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn))
r_sxt = np.dot(P_nxn, states_nxt)
for s in xrange(nspikifications):
data_e.append(r_sxt)
u_e.append(u_1xt)
outs_e.append(outputs_t_bxn)
truth_data_e = normalize_rates(data_e, E, N)
spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt,
max_firing_rate=FLAGS.max_firing_rate)
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nspikifications)
data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e,
train_inds,
valid_inds)
data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e,
train_inds,
valid_inds)
data_train_truth = nparray_and_transpose(data_train_truth)
data_valid_truth = nparray_and_transpose(data_valid_truth)
data_train_spiking = nparray_and_transpose(data_train_spiking)
data_valid_spiking = nparray_and_transpose(data_valid_spiking)
# save down the inputs used to generate this data
train_inputs_u, valid_inputs_u = split_list_by_inds(u_e,
train_inds,
valid_inds)
train_inputs_u = nparray_and_transpose(train_inputs_u)
valid_inputs_u = nparray_and_transpose(valid_inputs_u)
# save down the network outputs (may be useful later)
train_outputs_u, valid_outputs_u = split_list_by_inds(outs_e,
train_inds,
valid_inds)
train_outputs_u = np.array(train_outputs_u)
valid_outputs_u = np.array(valid_outputs_u)
data = { 'train_truth': data_train_truth,
'valid_truth': data_valid_truth,
'train_data' : data_train_spiking,
'valid_data' : data_valid_spiking,
'train_percentage' : train_percentage,
'nspikifications' : nspikifications,
'dt' : FLAGS.dt,
'u_std' : FLAGS.u_std,
'max_firing_rate': FLAGS.max_firing_rate,
'train_inputs_u': train_inputs_u,
'valid_inputs_u': valid_inputs_u,
'train_outputs_u': train_outputs_u,
'valid_outputs_u': valid_outputs_u,
'conversion_factor' : FLAGS.max_firing_rate/(1.0/FLAGS.dt) }
# just one dataset here
datasets = {}
dataset_name = 'dataset_N' + str(N)
datasets[dataset_name] = data
# write out the dataset
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
print ('Saved to ', os.path.join(FLAGS.save_dir,
FLAGS.datafile_name + '_' + dataset_name))
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/linear_model/setup.py | 144 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 141 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
glennq/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 346 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | examples/manifold/plot_compare_methods.py | 31 | 4051 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | examples/feature_stacker.py | 78 | 1911 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way too high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
theoryno3/scikit-learn | sklearn/linear_model/tests/test_base.py | 119 | 10082 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
giorgiop/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 303 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | examples/model_selection/plot_precision_recall.py | 248 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
SpectraLogic/samba | third_party/dnspython/tests/zone.py | 50 | 14048 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import filecmp
import os
import unittest
import dns.exception
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.zone
example_text = """$TTL 3600
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 a 10.0.0.1
ns2 a 10.0.0.2
$TTL 300
$ORIGIN foo.example.
bar mx 0 blaz
"""
example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
"""
something_quite_similar = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
bar.foo 300 IN MX 0 blaz.foo
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.3
"""
something_different = """@ 3600 IN SOA fooa bar 1 2 3 4 5
@ 3600 IN NS ns11
@ 3600 IN NS ns21
bar.fooa 300 IN MX 0 blaz.fooa
ns11 3600 IN A 10.0.0.11
ns21 3600 IN A 10.0.0.21
"""
ttl_example_text = """$TTL 1h
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 1d1s a 10.0.0.1
ns2 1w1D1h1m1S a 10.0.0.2
"""
no_soa_text = """$TTL 1h
$ORIGIN example.
@ ns ns1
@ ns ns2
ns1 1d1s a 10.0.0.1
ns2 1w1D1h1m1S a 10.0.0.2
"""
no_ns_text = """$TTL 1h
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
"""
include_text = """$INCLUDE "example"
"""
bad_directive_text = """$FOO bar
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 1d1s a 10.0.0.1
ns2 1w1D1h1m1S a 10.0.0.2
"""
_keep_output = False
class ZoneTestCase(unittest.TestCase):
def testFromFile1(self):
z = dns.zone.from_file('example', 'example')
ok = False
try:
z.to_file('example1.out', nl='\x0a')
ok = filecmp.cmp('example1.out', 'example1.good')
finally:
if not _keep_output:
os.unlink('example1.out')
self.failUnless(ok)
def testFromFile2(self):
z = dns.zone.from_file('example', 'example', relativize=False)
ok = False
try:
z.to_file('example2.out', relativize=False, nl='\x0a')
ok = filecmp.cmp('example2.out', 'example2.good')
finally:
if not _keep_output:
os.unlink('example2.out')
self.failUnless(ok)
def testFromText(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
f = cStringIO.StringIO()
names = z.nodes.keys()
names.sort()
for n in names:
print >> f, z[n].to_text(n)
self.failUnless(f.getvalue() == example_text_output)
def testTorture1(self):
#
# Read a zone containing all our supported RR types, and
# for each RR in the zone, convert the rdata into wire format
# and then back out, and see if we get equal rdatas.
#
f = cStringIO.StringIO()
o = dns.name.from_text('example.')
z = dns.zone.from_file('example', o)
for (name, node) in z.iteritems():
for rds in node:
for rd in rds:
f.seek(0)
f.truncate()
rd.to_wire(f, origin=o)
wire = f.getvalue()
rd2 = dns.rdata.from_wire(rds.rdclass, rds.rdtype,
wire, 0, len(wire),
origin = o)
self.failUnless(rd == rd2)
def testEqual(self):
z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
z2 = dns.zone.from_text(example_text_output, 'example.',
relativize=True)
self.failUnless(z1 == z2)
def testNotEqual1(self):
z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
z2 = dns.zone.from_text(something_quite_similar, 'example.',
relativize=True)
self.failUnless(z1 != z2)
def testNotEqual2(self):
z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
z2 = dns.zone.from_text(something_different, 'example.',
relativize=True)
self.failUnless(z1 != z2)
def testNotEqual3(self):
z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
z2 = dns.zone.from_text(something_different, 'example2.',
relativize=True)
self.failUnless(z1 != z2)
def testFindRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rds = z.find_rdataset('@', 'soa')
exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
self.failUnless(rds == exrds)
def testFindRdataset2(self):
def bad():
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rds = z.find_rdataset('@', 'loc')
self.failUnlessRaises(KeyError, bad)
def testFindRRset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rrs = z.find_rrset('@', 'soa')
exrrs = dns.rrset.from_text('@', 300, 'IN', 'SOA', 'foo bar 1 2 3 4 5')
self.failUnless(rrs == exrrs)
def testFindRRset2(self):
def bad():
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rrs = z.find_rrset('@', 'loc')
self.failUnlessRaises(KeyError, bad)
def testGetRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rds = z.get_rdataset('@', 'soa')
exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
self.failUnless(rds == exrds)
def testGetRdataset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rds = z.get_rdataset('@', 'loc')
self.failUnless(rds == None)
def testGetRRset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rrs = z.get_rrset('@', 'soa')
exrrs = dns.rrset.from_text('@', 300, 'IN', 'SOA', 'foo bar 1 2 3 4 5')
self.failUnless(rrs == exrrs)
def testGetRRset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rrs = z.get_rrset('@', 'loc')
self.failUnless(rrs == None)
def testReplaceRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rdataset = dns.rdataset.from_text('in', 'ns', 300, 'ns3', 'ns4')
z.replace_rdataset('@', rdataset)
rds = z.get_rdataset('@', 'ns')
self.failUnless(rds is rdataset)
def testReplaceRdataset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
rdataset = dns.rdataset.from_text('in', 'txt', 300, '"foo"')
z.replace_rdataset('@', rdataset)
rds = z.get_rdataset('@', 'txt')
self.failUnless(rds is rdataset)
def testDeleteRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
z.delete_rdataset('@', 'ns')
rds = z.get_rdataset('@', 'ns')
self.failUnless(rds is None)
def testDeleteRdataset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
z.delete_rdataset('ns1', 'a')
node = z.get_node('ns1')
self.failUnless(node is None)
def testNodeFindRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.find_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
self.failUnless(rds == exrds)
def testNodeFindRdataset2(self):
def bad():
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.find_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
self.failUnlessRaises(KeyError, bad)
def testNodeGetRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
self.failUnless(rds == exrds)
def testNodeGetRdataset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
self.failUnless(rds == None)
def testNodeDeleteRdataset1(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
self.failUnless(rds == None)
def testNodeDeleteRdataset2(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
node = z['@']
rds = node.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
self.failUnless(rds == None)
def testIterateRdatasets(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
ns = [n for n, r in z.iterate_rdatasets('A')]
ns.sort()
self.failUnless(ns == [dns.name.from_text('ns1', None),
dns.name.from_text('ns2', None)])
def testIterateAllRdatasets(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
ns = [n for n, r in z.iterate_rdatasets()]
ns.sort()
self.failUnless(ns == [dns.name.from_text('@', None),
dns.name.from_text('@', None),
dns.name.from_text('bar.foo', None),
dns.name.from_text('ns1', None),
dns.name.from_text('ns2', None)])
def testIterateRdatas(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
l = list(z.iterate_rdatas('A'))
l.sort()
exl = [(dns.name.from_text('ns1', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2'))]
self.failUnless(l == exl)
def testIterateAllRdatas(self):
z = dns.zone.from_text(example_text, 'example.', relativize=True)
l = list(z.iterate_rdatas())
l.sort()
exl = [(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns1')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
'ns2')),
(dns.name.from_text('@', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
'foo bar 1 2 3 4 5')),
(dns.name.from_text('bar.foo', None),
300,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
'0 blaz.foo')),
(dns.name.from_text('ns1', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.1')),
(dns.name.from_text('ns2', None),
3600,
dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
'10.0.0.2'))]
self.failUnless(l == exl)
def testTTLs(self):
z = dns.zone.from_text(ttl_example_text, 'example.', relativize=True)
n = z['@']
rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
self.failUnless(rds.ttl == 3600)
n = z['ns1']
rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
self.failUnless(rds.ttl == 86401)
n = z['ns2']
rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
self.failUnless(rds.ttl == 694861)
def testNoSOA(self):
def bad():
z = dns.zone.from_text(no_soa_text, 'example.',
relativize=True)
self.failUnlessRaises(dns.zone.NoSOA, bad)
def testNoNS(self):
def bad():
z = dns.zone.from_text(no_ns_text, 'example.',
relativize=True)
self.failUnlessRaises(dns.zone.NoNS, bad)
def testInclude(self):
z1 = dns.zone.from_text(include_text, 'example.', relativize=True,
allow_include=True)
z2 = dns.zone.from_file('example', 'example.', relativize=True)
self.failUnless(z1 == z2)
def testBadDirective(self):
def bad():
z = dns.zone.from_text(bad_directive_text, 'example.',
relativize=True)
self.failUnlessRaises(dns.exception.SyntaxError, bad)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
glennq/scikit-learn | sklearn/neural_network/multilayer_perceptron.py | 9 | 50002 | """Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <issam.laradji@gmail.com>
# Andreas Mueller
# Jiyuan Qian
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.optimize import fmin_l_bfgs_b
import warnings
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..externals import six
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import check_array, check_X_y, column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import _check_partial_fit_first_call, unique_labels
from ..utils.multiclass import type_of_target
_STOCHASTIC_SOLVERS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, solver,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
activations[i + 1] = hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
activations[i + 1] = output_activation(activations[i + 1])
return activations
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
return coef_grads, intercept_grads
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == 'log_loss' and self.out_activation_ == 'logistic':
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
# Add L2 regularization term to loss
values = np.sum(
np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
# Compute gradient for the last layer
coef_grads, intercept_grads = self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative = DERIVATIVES[self.activation]
inplace_derivative(activations[i], deltas[i - 1])
coef_grads, intercept_grads = self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not isinstance(self, ClassifierMixin):
self.out_activation_ = 'identity'
# Output for multi class
elif self._label_binarizer.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1])
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out):
if self.activation == 'logistic':
# Use the initialization method recommended by
# Glorot et al.
init_bound = np.sqrt(2. / (fan_in + fan_out))
elif self.activation in ('identity', 'tanh', 'relu'):
init_bound = np.sqrt(6. / (fan_in + fan_out))
else:
# this was caught earlier, just to make sure
raise ValueError("Unknown activation function %s" %
self.activation)
coef_init = self._random_state.uniform(-init_bound, init_bound,
(fan_in, fan_out))
intercept_init = self._random_state.uniform(-init_bound, init_bound,
fan_out)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
X, y = self._validate_input(X, y, incremental)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
# check random state
self._random_state = check_random_state(self.random_state)
if not hasattr(self, 'coefs_') or (not self.warm_start and not
incremental):
# First time training the model
self._initialize(y, layer_units)
# lbfgs does not support mini-batches
if self.solver == 'lbfgs':
batch_size = n_samples
elif self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
# Initialize lists
activations = [X]
activations.extend(np.empty((batch_size, n_fan_out))
for n_fan_out in layer_units[1:])
deltas = [np.empty_like(a_layer) for a_layer in activations]
coef_grads = [np.empty((n_fan_in_, n_fan_out_)) for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_) for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS solver
elif self.solver == 'lbfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
# raise ValueError if not registered
supported_activations = ('identity', 'logistic', 'tanh', 'relu')
if self.activation not in supported_activations:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s." % (self.activation,
supported_activations))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
supported_solvers = _STOCHASTIC_SOLVERS + ["lbfgs"]
if self.solver not in supported_solvers:
raise ValueError("The solver %s is not supported. "
" Expected one of: %s" %
(self.solver, ", ".join(supported_solvers)))
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
optimal_parameters, self.loss_, d = fmin_l_bfgs_b(
x0=packed_coef_inter,
func=self._loss_grad_lbfgs,
maxfun=self.max_iter,
iprint=iprint,
pgtol=self.tol,
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self._unpack(optimal_parameters)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.solver == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.solver == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
X, X_val, y, y_val = train_test_split(
X, y, random_state=self._random_state,
test_size=self.validation_fraction)
if isinstance(self, ClassifierMixin):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
X, y = shuffle(X, y, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
activations[0] = X[batch_slice]
batch_loss, coef_grads, intercept_grads = self._backprop(
X[batch_slice], y[batch_slice], activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > 2:
# not better than last two iterations by tol.
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for two consecutive epochs." % self.tol)
else:
msg = ("Training loss did not improve more than tol=%f"
" for two consecutive epochs." % self.tol)
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn('Stochastic Optimizer: Maximum iterations'
' reached and the optimization hasn\'t '
'converged yet.'
% (), ConvergenceWarning)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizers. %s is not stochastic."
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
return self._fit(X, y, incremental=True)
def _predict(self, X):
"""Predict using the trained model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = [X]
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations)
y_pred = activations[-1]
return y_pred
class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default 'adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
max_iter : int, optional, default 200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when solver='adam'
Attributes
----------
`classes_` : array or list of array of shape (n_classes,)
Class labels for each output.
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPClassifier, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
if not incremental:
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
else:
classes = unique_labels(y)
if np.setdiff1d(classes, self.classes_, assume_unique=True):
raise ValueError("`y` has classes not in `self.classes_`."
" `self.classes_` has %s. 'y' has %s." %
(self.classes_, classes))
y = self._label_binarizer.transform(y)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizer. %s is not stochastic"
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith('multilabel'):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
super(MLPClassifier, self)._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : array-like, shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
"""Multi-layer Perceptron regressor.
This model optimizes the squared-loss using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default 'adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when solver='adam'
Attributes
----------
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPRegressor, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
| bsd-3-clause |
pianomania/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 103 | 5681 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/covariance/plot_covariance_estimation.py | 97 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/cluster/plot_cluster_comparison.py | 57 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
PythonProgramming/Unsupervised-Machine-Learning-Basics | hierarchical_clustering_with_mean_shift.py | 2 | 1183 | import numpy as np
from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
# centers = [[1,1],[5,5]]
# centers = [[1,1],[5,5],[3,8]]
centers = [[1,1],[5,5],[3,10]]
# "_" to ignore labels and note there is a max of 10,000 samples:
# X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=1)
# X, _ = make_blobs(n_samples=500, centers=centers, cluster_std=1)
# X, _ = make_blobs(n_samples=500, centers=centers, cluster_std=5)
X, _ = make_blobs(n_samples=500, centers=centers, cluster_std=0.3)
plt.scatter(X[:,0], X[:,1])
plt.show()
ms = MeanShift()
ms.fit(X)
labels = ms.labels_ # not the same as the "_" labels above
cluster_centers = ms.cluster_centers_
print(cluster_centers)
n_clusters = len(np.unique(labels))
print("Number of estimated clusters:", n_clusters)
colors = 10*['r.','g.','b.','c.','k.','y.','m.']
print(colors)
print(labels)
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize=10)
plt.scatter(
cluster_centers[:,0],cluster_centers[:,1],
marker="x", s=150, linewidths=5, zorder=10
)
plt.show()
| mit |
giorgiop/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 47 | 3653 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
theoryno3/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 212 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
pianomania/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 72 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/datasets/__init__.py | 175 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
pianomania/scikit-learn | sklearn/utils/graph.py | 24 | 6326 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph : sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> list(sorted(single_source_shortest_path_length(graph, 0).items()))
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> graph = np.ones((6, 6))
>>> list(sorted(single_source_shortest_path_length(graph, 2).items()))
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
pianomania/scikit-learn | benchmarks/bench_plot_lasso_path.py | 82 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
TransportLayer/mc-id2name | id2name.py | 1 | 45678 | ###############################################################################
# Minecraft ID to Friendly Name #
# Copyright (C) 2016 TransportLayer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
items = {
'minecraft': {
'__VERSION__': 1.10,
'__LANGUAGE__': 'en_US',
'stone': {
'id': 1,
'category': 'Building Blocks',
0: 'Stone',
1: 'Granite',
2: 'Polished Granite',
3: 'Diorite',
4: 'Polished Diorite',
5: 'Andesite',
6: 'Polished Andesite'
},
'grass': {
'id': 2,
'category': 'Building Blocks',
0: 'Grass Block'
},
'dirt': {
'id': 3,
'category': 'Building Blocks',
0: 'Dirt',
1: 'Coarse Dirt',
2: 'Podzol'
},
'cobblestone': {
'id': 4,
'category': 'Building Blocks',
0: 'Cobblestone'
},
'planks': {
'id': 5,
'category': 'Building Blocks',
0: 'Oak Wood Planks',
1: 'Spruce Wood Planks',
2: 'Birch Wood Planks',
3: 'Jungle Wood Planks',
4: 'Acacia Wood Planks',
5: 'Dark Oak Wood Planks'
},
'sapling': {
'id': 6,
'category': 'Decoration Blocks',
0: 'Oak Sapling',
1: 'Spruce Sapling',
2: 'Birch Sapling',
3: 'Jungle Sapling',
4: 'Acacia Sapling',
5: 'Dark Oak Sapling'
},
'bedrock': {
'id': 7,
'category': 'Building Blocks',
0: 'Bedrock'
},
# No item 8?
# No item 9?
# No item 10?
# No item 11?
'sand': {
'id': 12,
'category': 'Building Blocks',
0: 'Sand',
1: 'Red Sand'
},
'gravel': {
'id': 13,
'category': 'Building Blocks',
0: 'Gravel'
},
'gold_ore': {
'id': 14,
'category': 'Building Blocks',
0: 'Gold Ore'
},
'iron_ore': {
'id': 15,
'category': 'Building Blocks',
0: 'Iron Ore'
},
'coal_ore': {
'id': 16,
'category': 'Building Blocks',
0: 'Coal Ore'
},
'log': {
'id': 17,
'category': 'Building Blocks',
0: 'Oak Wood',
1: 'Spruce Wood',
2: 'Birch Wood',
3: 'Jungle Wood'
},
'leaves': {
'id': 18,
'category': 'Decoration Blocks',
0: 'Oak Leaves',
1: 'Spruce Leaves',
2: 'Birch Leaves',
3: 'Jungle Leaves'
},
'sponge': {
'id': 19,
'category': 'Building Blocks',
0: 'Sponge',
1: 'Wet Sponge'
},
'glass': {
'id': 20,
'category': 'Building Blocks',
0: 'Glass'
},
'lapis_ore': {
'id': 21,
'category': 'Building Blocks',
0: 'Lapis Lazuli Ore'
},
'lapis_block': {
'id': 22,
'category': 'Building Blocks',
0: 'Lapis Lazuli Block'
},
'dispenser': {
'id': 23,
'category': 'Redstone',
0: 'Dispenser'
},
'sandstone': {
'id': 24,
'category': 'Building Blocks',
0: 'Sandstone',
1: 'Chiseled Sandstone',
2: 'Smooth Sandstone'
},
'noteblock': {
'id': 25,
'category': 'Redstone',
0: 'Note Block'
},
# No item 26?
'golden_rail': {
'id': 27,
'category': 'Transportation',
0: 'Powered Rail'
},
'detector_rail': {
'id': 28,
'category': 'Transportation',
0: 'Detector Rail'
},
'sticky_piston': {
'id': 29,
'category': 'Redstone',
0: 'Sticky Piston'
},
'web': {
'id': 30,
'category': 'Decoration Blocks',
0: 'Cobweb'
},
'tallgrass': {
'id': 31,
'category': 'Decoration Blocks',
# Missing DV 0?
1: 'Grass',
2: 'Fern'
},
'deadbush': {
'id': 32,
'category': 'Decoration Blocks',
0: 'Dead Bush'
},
'piston': {
'id': 33,
'category': 'Redstone',
0: 'Piston'
},
# No item 34?
'wool': {
'id': 35,
'category': 'Building Blocks',
0: 'Wool',
1: 'Orange Wool',
2: 'Magenta Wool',
3: 'Light Blue Wool',
4: 'Yellow Wool',
5: 'Lime Wool',
6: 'Pink Wool',
7: 'Gray Wool',
8: 'Light Gray Wool',
9: 'Cyan Wool',
10: 'Purple Wool',
11: 'Blue Wool',
12: 'Brown Wool',
13: 'Green Wool',
14: 'Red Wool',
15: 'Black Wool'
},
# No item 36?
'yellow_flower': {
'id': 37,
'category': 'Decoration Blocks',
0: 'Dandelion'
# Marked for more DVs.
},
'red_flower': {
'id': 38,
'category': 'Decoration Blocks',
0: 'Poppy',
1: 'Blue Orchid', # Not red.
2: 'Allium', # Also not red.
3: 'Azure Bluet', # Still not red.
4: 'Red Tulip', # Wow, good job, this one's red.
5: 'Orange Tulip', # Closer to red...?
6: 'White Tulip', # Farther from red.
7: 'Pink Tulip', # Ah, there we go, back on track.
8: 'Oxeye Daisy' # I give up at this point.
},
'brown_mushroom': {
'id': 39,
'category': 'Decoration Blocks',
0: 'Mushroom'
},
'red_mushroom': {
'id': 40,
'category': 'Decoration Blocks',
0: 'Mushroom'
},
'gold_block': {
'id': 41,
'category': 'Building Blocks',
0: 'Block of Gold'
},
'iron_block': {
'id': 42,
'category': 'Building Blocks',
0: 'Block of Iron'
},
# No item 43?
'stone_slab': {
'id': 44,
'category': 'Building Blocks',
0: 'Stone Slab',
1: 'Sandstone Slab',
# No DV 2?
3: 'Cobblestone Slab',
4: 'Bricks Slab',
5: 'Stone Bricks Slab',
6: 'Nether Brick Slab',
7: 'Quartz Slab'
},
'brick_block': {
'id': 45,
'category': 'Building Blocks',
0: 'Bricks'
},
'tnt': {
'id': 46,
'category': 'Redstone',
0: 'TNT'
},
'bookshelf': {
'id': 47,
'category': 'Building Blocks',
0: 'Bookshelf'
},
'mossy_cobblestone': {
'id': 48,
'category': 'Building Blocks',
0: 'Moss Stone'
},
'obsidian': {
'id': 49,
'category': 'Building Blocks',
0: 'Obsidian'
},
'torch': {
'id': 50,
'category': 'Decoration Blocks',
0: 'Torch'
},
# No item 51?
# No item 52?
'oak_stairs': {
'id': 53,
'category': 'Building Blocks',
0: 'Oak Wood Stairs'
},
'chest': {
'id': 54,
'category': 'Decoration Blocks',
0: 'Chest'
},
# No item 55?
'diamond_ore': {
'id': 56,
'category': 'Building Blocks',
0: 'Diamond Ore'
},
'diamond_block': {
'id': 57,
'category': 'Building Blocks',
0: 'Block of Diamond'
},
'crafting_table': {
'id': 58,
'category': 'Decoration Blocks',
0: 'Crafting Table'
},
# No item 59?
# No item 60?
'furnace': {
'id': 61,
'category': 'Decoration Blocks',
0: 'Furnace'
},
# No item 62?
# No item 63?
# No item 64?
'ladder': {
'id': 65,
'category': 'Decoration Blocks',
0: 'Ladder'
},
'rail': {
'id': 66,
'category': 'Transportation',
0: 'Rail'
},
'stone_stairs': {
'id': 67,
'category': 'Building Blocks',
0: 'Cobblestone Stairs'
},
# No item 68?
'lever': {
'id': 69,
'category': 'Redstone',
0: 'Lever'
},
'stone_pressure_plate': {
'id': 70,
'category': 'Redstone',
0: 'Stone Pressure Plate'
},
# No item 71?
'wooden_pressure_plate': {
'id': 72,
'category': 'Redstone',
0: 'Wooden Pressure Plate'
},
'redstone_ore': {
'id': 73,
'category': 'Building Blocks',
0: 'Redstone Ore'
},
# No item 74?
# No item 75?
'redstone_torch': {
'id': 76,
'category': 'Redstone',
0: 'Redstone Torch'
},
'stone_button': {
'id': 77,
'category': 'Redstone',
0: 'Button'
},
'snow_layer': {
'id': 78,
'category': 'Decoration Blocks',
0: 'Snow'
# Marked for more DVs.
},
'ice': {
'id': 79,
'category': 'Building Blocks',
0: 'Ice'
},
'snow': {
'id': 80,
'category': 'Building Blocks',
0: 'Snow'
},
'cactus': {
'id': 81,
'category': 'Decoration Blocks',
0: 'Cactus'
},
'clay': {
'id': 82,
'category': 'Building Blocks',
0: 'Clay'
},
# No item 83?
'jukebox': {
'id': 84,
'category': 'Decoration Blocks',
0: 'Jukebox'
},
'fence': {
'id': 85,
'category': 'Decoration Blocks',
0: 'Oak Fence'
},
'pumpkin': {
'id': 86,
'category': 'Building Blocks',
0: 'Pumpkin'
},
'netherrack': {
'id': 87,
'category': 'Building Blocks',
0: 'Netherrack'
},
'soul_sand': {
'id': 88,
'category': 'Building Blocks',
0: 'Soul Sand'
},
'glowstone': {
'id': 89,
'category': 'Building Blocks',
0: 'Glowstone'
},
# No item 90?
'lit_pumpkin': {
'id': 91,
'category': 'Building Blocks',
0: 'Jack o\'Lantern'
},
# No item 92?
# No item 93?
# No item 94?
'stained_glass': {
'id': 95,
'category': 'Building Blocks',
0: 'White Stained Glass',
1: 'Orange Stained Glass',
2: 'Magenta Stained Glass',
3: 'Light Blue Stained Glass',
4: 'Yellow Stained Glass',
5: 'Lime Stained Glass',
6: 'Pink Stained Glass',
7: 'Gray Stained Glass',
8: 'Light Gray Stained Glass',
9: 'Cyan Stained Glass',
10: 'Purple Stained Glass',
11: 'Blue Stained Glass',
12: 'Brown Stained Glass',
13: 'Green Stained Glass',
14: 'Red Stained Glass',
15: 'Black Stained Glass'
},
'trapdoor': {
'id': 96,
'category': 'Redstone',
0: 'Wooden Trapdoor'
},
'monster_egg': {
'id': 97,
'category': 'Decoration Blocks',
0: 'Stone Monster Egg',
1: 'Cobblestone Monster Egg',
2: 'Stone Brick Monster Egg',
3: 'Mossy Stone Brick Monster Egg',
4: 'Cracked Stone Brick Monster Egg',
5: 'Chiseled Stone Brick Monster Egg'
},
'stonebrick': {
'id': 98,
'category': 'Building Blocks',
0: 'Stone Bricks',
1: 'Mossy Stone Bricks',
2: 'Cracked Stone Bricks',
3: 'Chiseled Stone Bricks'
},
# No item 99?
# No item 100?
'iron_bars': {
'id': 101,
'category': 'Decoration Blocks',
0: 'Iron Bars'
},
'glass_pane': {
'id': 102,
'category': 'Decoration Blocks',
0: 'Glass Pane'
},
'melon_block': {
'id': 103,
'category': 'Building Blocks',
0: 'Melon'
},
# No item 104?
# No item 105?
'vine': {
'id': 106,
'category': 'Decoration Blocks',
0: 'Vines'
},
'fence_gate': {
'id': 107,
'category': 'Redstone',
0: 'Oak Fence Gate'
},
'brick_stairs': {
'id': 108,
'category': 'Building Blocks',
0: 'Brick Stairs'
},
'stone_brick_stairs': {
'id': 109,
'category': 'Building Blocks',
0: 'Stone Brick Stairs'
},
'mycelium': {
'id': 110,
'category': 'Building Blocks',
0: 'Mycelium'
},
'waterlily': {
'id': 111,
'category': 'Decoration Blocks',
0: 'Lily Pad'
},
'nether_brick': {
'id': 112,
'category': 'Building Blocks',
0: 'Nether Brick'
},
'nether_brick_fence': {
'id': 113,
'category': 'Decoration Blocks',
0: 'Nether Brick Fence'
},
'nether_brick_stairs': {
'id': 114,
'category': 'Building Blocks',
0: 'Nether Brick Stairs'
},
# No item 115?
'enchanting_table': {
'id': 116,
'category': 'Decoration Blocks',
0: 'Enchantment Table'
},
# No item 117?
# No item 118?
# No item 119?
'end_portal_frame': {
'id': 120,
'category': 'Decoration Blocks',
0: 'End Portal'
},
'end_stone': {
'id': 121,
'category': 'Building Blocks',
0: 'End Stone'
},
'redstone_lamp': {
'id': 123,
'category': 'Redstone',
0: 'Redstone Lamp'
},
# No item 124?
# No item 125?
'wooden_slab': {
'id': 126,
'category': 'Building Blocks',
0: 'Oak Wood Slab',
1: 'Spruce Wood Slab',
2: 'Birch Wood Slab',
3: 'Jungle Wood Slab',
4: 'Acacia Wood Slab',
5: 'Dark Oak Wood Slab'
},
# No item 127?
'sandstone_stairs': {
'id': 128,
'category': 'Building Blocks',
0: 'Sandstone Stairs'
},
'emerald_ore': {
'id': 129,
'category': 'Building Blocks',
0: 'Emerald Ore'
},
'ender_chest': {
'id': 130,
'category': 'Decoration Blocks',
0: 'Ender Chest'
},
'tripwire_hook': {
'id': 131,
'category': 'Redstone',
0: 'Tripwire Hook'
},
# No item 132?
'emerald_block': {
'id': 133,
'category': 'Building Blocks',
0: 'Block of Emerald'
},
'spruce_stairs': {
'id': 134,
'category': 'Building Blocks',
0: 'Spruce Wood Stairs'
},
'birch_stairs': {
'id': 135,
'category': 'Building Blocks',
0: 'Birch Wood Stairs'
},
'jungle_stairs': {
'id': 136,
'category': 'Building Blocks',
0: 'Jungle Wood Stairs'
},
# No item 137?
'beacon': {
'id': 138,
'category': 'Miscellaneous',
0: 'Beacon'
},
'cobblestone_wall': {
'id': 139,
'category': 'Building Blocks',
0: 'Cobblestone Wall',
1: 'Mossy Cobblestone Wall'
},
# No item 140?
# No item 141?
# No item 142?
'wooden_button': {
'id': 143,
'category': 'Redstone',
0: 'Button'
},
# No item 144?
'anvil': {
'id': 145,
'category': 'Decoration Blocks',
0: 'Anvil',
1: 'Slightly Damaged Anvil',
2: 'Very Damaged Anvil'
},
'trapped_chest': {
'id': 146,
'category': 'Redstone',
0: 'Trapped Chest'
},
'light_weighted_pressure_plate': {
'id': 147,
'category': 'Redstone',
0: 'Weighted Pressure Plate (Light)'
},
'heavy_weighted_pressure_plate': {
'id': 148,
'category': 'Redstone',
0: 'Weighted Pressure Plate (Heavy)'
},
# No item 149?
# No item 150?
'daylight_detector': {
'id': 151,
'category': 'Redstone',
0: 'Daylight Sensor'
},
'redstone_block': {
'id': 152,
'category': 'Redstone',
0: 'Block of Redstone'
},
'quartz_ore': {
'id': 153,
'category': 'Building Blocks',
0: 'Nether Quartz Ore'
},
'hopper': {
'id': 154,
'category': 'Redstone',
0: 'Hopper'
},
'quartz_block': {
'id': 155,
'category': 'Building Blocks',
0: 'Block of Quartz',
1: 'Chiseled Quartz Block',
2: 'Pillar Quartz Block'
},
'quartz_stairs': {
'id': 156,
'category': 'Building Blocks',
0: 'Quartz Stairs'
},
'activator_rail': {
'id': 157,
'category': 'Transportation',
0: 'Activator Rail'
},
'dropper': {
'id': 158,
'category': 'Redstone',
0: 'Dropper'
},
'stained_hardened_clay': {
'id': 159,
'category': 'Building Blocks',
0: 'White Hardened Clay',
1: 'Orange Hardened Clay',
2: 'Magenta Hardened Clay',
3: 'Light Blue Hardened Clay',
4: 'Yellow Hardened Clay',
5: 'Lime Hardened Clay',
6: 'Pink Hardened Clay',
7: 'Gray Hardened Clay',
8: 'Light Gray Hardened Clay',
9: 'Cyan Hardened Clay',
10: 'Purple Hardened Clay',
11: 'Blue Hardened Clay',
12: 'Brown Hardened Clay',
13: 'Green Hardened Clay',
14: 'Red Hardened Clay',
15: 'Black Hardened Clay'
},
'stained_glass_pane': {
'id': 160,
'category': 'Decoration Blocks',
0: 'White Stained Glass Pane',
1: 'Orange Stained Glass Pane',
2: 'Magenta Stained Glass Pane',
3: 'Light Blue Stained Glass Pane',
4: 'Yellow Stained Glass Pane',
5: 'Lime Stained Glass Pane',
6: 'Pink Stained Glass Pane',
7: 'Gray Stained Glass Pane',
8: 'Light Gray Stained Glass Pane',
9: 'Cyan Stained Glass Pane',
10: 'Purple Stained Glass Pane',
11: 'Blue Stained Glass Pane',
12: 'Brown Stained Glass Pane',
13: 'Green Stained Glass Pane',
14: 'Red Stained Glass Pane',
15: 'Black Stained Glass Pane'
},
'leaves2': {
'id': 161,
'category': 'Decoration Blocks',
0: 'Acacia Leaves',
1: 'Dark Oak Leaves'
},
'log2': {
'id': 162,
'category': 'Building Blocks',
0: 'Acacia Wood',
1: 'Dark Oak Wood'
},
'acacia_stairs': {
'id': 163,
'category': 'Building Blocks',
0: 'Acacia Wood Stairs'
},
'dark_oak_stairs': {
'id': 164,
'category': 'Building Blocks',
0: 'Dark Oak Wood Stairs'
},
'slime': {
'id': 165,
'category': 'Decoration Blocks',
0: 'Slime Block'
},
'iron_trapdoor': {
'id': 167,
'category': 'Redstone',
0: 'Iron Trapdoor'
},
'prismarine': {
'id': 168,
'category': 'Building Blocks',
0: 'Prismarine',
1: 'Prismarine Bricks',
2: 'Dark Prismarine'
},
'sea_lantern': {
'id': 169,
'category': 'Building Blocks',
0: 'Sea Lantern'
},
'hay_block': {
'id': 170,
'category': 'Building Blocks',
0: 'Hay Bale'
},
'carpet': {
'id': 171,
'category': 'Decoration Blocks',
0: 'Carpet',
1: 'Orange Carpet',
2: 'Magenta Carpet',
3: 'Light Blue Carpet',
4: 'Yellow Carpet',
5: 'Lime Carpet',
6: 'Pink Carpet',
7: 'Gray Carpet',
8: 'Light Gray Carpet',
9: 'Cyan Carpet',
10: 'Purple Carpet',
11: 'Blue Carpet',
12: 'Brown Carpet',
13: 'Green Carpet',
14: 'Red Carpet',
15: 'Black Carpet'
},
'hardened_clay': {
'id': 172,
'category': 'Building Blocks',
0: 'Hardened Clay'
},
'coal_block': {
'id': 173,
'category': 'Building Blocks',
0: 'Block of Coal'
},
'packed_ice': {
'id': 174,
'category': 'Building Blocks',
0: 'Packed Ice'
},
'double_plant': {
'id': 175,
'category': 'Decoration Blocks',
0: 'Sunflower',
1: 'Lilac',
2: 'Double Tallgrass',
3: 'Large Fern',
4: 'Rose Bush',
5: 'Peony'
},
# No item 176?
# No item 177?
# No item 178?
'red_sandstone': {
'id': 179,
'category': 'Building Blocks',
0: 'Red Sandstone',
1: 'Chiseled Red Sandstone',
2: 'Smooth Red Sandstone'
},
'red_sandstone_stairs': {
'id': 180,
'category': 'Building Blocks',
0: 'Red Sandstone Stairs'
},
# No item 181?
'stone_slab2': {
'id': 182,
'category': 'Building Blocks',
0: 'Red Sandstone Slab'
# Marked for more DVs.
},
'spruce_fence_gate': {
'id': 183,
'category': 'Redstone',
0: 'Spruce Fence Gate'
},
'birch_fence_gate': {
'id': 184,
'category': 'Redstone',
0: 'Birch Fence Gate'
},
'jungle_fence_gate': {
'id': 185,
'category': 'Redstone',
0: 'Jungle Fence Gate'
},
'dark_oak_fence_gate': {
'id': 186,
'category': 'Redstone',
0: 'Dark Oak Fence Gate'
},
'acacia_fence_gate': {
'id': 187,
'category': 'Redstone',
0: 'Acacia Fence Gate'
},
'spruce_fence': {
'id': 188,
'category': 'Decoration Blocks',
0: 'Spruce Fence'
},
'birch_fence': {
'id': 189,
'category': 'Decoration Blocks',
0: 'Birch Fence'
},
'jungle_fence': {
'id': 190,
'category': 'Decoration Blocks',
0: 'Jungle Fence'
},
'dark_oak_fence': {
'id': 191,
'category': 'Decoration Blocks',
0: 'Dark Oak Fence'
},
'acacia_fence': {
'id': 192,
'category': 'Decoration Blocks',
0: 'Acacia Fence'
},
# No item 193?
# No item 194?
# No item 195?
# No item 196?
# No item 197?
'end_rod': {
'id': 198,
'category': 'Decoration Blocks',
0: 'End Rod'
},
'chorus_plant': {
'id': 199,
'category': 'Decoration Blocks',
0: 'Chorus Plant'
},
'chorus_flower': {
'id': 200,
'category': 'Decoration Blocks',
0: 'Chorus Flower'
},
'purpur_block': {
'id': 201,
'category': 'Building Blocks',
0: 'Purpur Block'
},
'purpur_pillar': {
'id': 202,
'category': 'Building Blocks',
0: 'Purpur Pillar'
},
'purpur_stairs': {
'id': 203,
'category': 'Building Blocks',
0: 'Purpur Stairs'
},
# No item 204?
'purpur_slab': {
'id': 205,
'category': 'Building Blocks',
0: 'Purpur Slab'
# Marked for more DVs.
},
'end_bricks': {
'id': 206,
'category': 'Building Blocks',
0: 'End Stone Bricks'
},
# No item 207?
# No item 208?
# No item 209?
# No item 210?
# No item 211?
# No item 212?
'magma': {
'id': 213,
'category': 'Building Blocks',
0: 'Magma Block'
},
'nether_wart_block': {
'id': 214,
'category': 'Building Blocks',
0: 'Nether Wart Block'
},
'red_nether_brick': {
'id': 215,
'category': 'Building Blocks',
0: 'Red Nether Brick'
},
'bone_block': {
'id': 216,
'category': 'Building Blocks',
0: 'Bone Block'
},
# No item...
# ...
# Start of 256 block.
'iron_shovel': {
'id': 256,
'category': 'Tools',
'name': 'Iron Shovel',
'uses': 251
},
'iron_pickaxe': {
'id': 257,
'category': 'Tools',
'name': 'Iron Pickaxe',
'uses': 251
},
'iron_axe': {
'id': 258,
'category': 'Tools',
'name': 'Iron Axe',
'uses': 251
},
'flint_and_steel': {
'id': 259,
'category': 'Tools',
'name': 'Flint and Steel',
'uses': 65
},
'apple': {
'id': 260,
'category': 'Foodstuffs',
0: 'Apple'
},
'bow': {
'id': 261,
'category': 'Combat',
'name': 'Bow',
'uses': 385
},
'arrow': {
'id': 262,
'category': 'Combat',
0: 'Arrow'
},
'coal': {
'id': 263,
'category': 'Materials',
0: 'Coal',
1: 'Charcoal'
},
'diamond': {
'id': 264,
'category': 'Materials',
0: 'Diamond'
},
'iron_ingot': {
'id': 265,
'category': 'Materials',
0: 'Iron Ingot'
},
'gold_ingot': {
'id': 266,
'category': 'Materials',
0: 'Gold Ingot'
},
'iron_sword': {
'id': 267,
'category': 'Combat',
'name': 'Iron Sword',
'uses': 251
},
'wooden_sword': {
'id': 268,
'category': 'Combat',
'name': 'Wooden Sword',
'uses': 60
},
'wooden_shovel': {
'id': 269,
'category': 'Tools',
'name': 'Wooden Shovel',
'uses': 60
},
'wooden_pickaxe': {
'id': 270,
'category': 'Tools',
'name': 'Wooden Pickaxe',
'uses': 60
},
'wooden_axe': {
'id': 271,
'category': 'Tools',
'name': 'Wooden Axe',
'uses': 60
},
'stone_sword': {
'id': 272,
'category': 'Combat',
'name': 'Stone Sword',
'uses': 132
},
'stone_shovel': {
'id': 273,
'category': 'Tools',
'name': 'Stone Shovel',
'uses': 132
},
'stone_pickaxe': {
'id': 274,
'category': 'Tools',
'name': 'Stone Pickaxe',
'uses': 132
},
'stone_axe': {
'id': 275,
'category': 'Tools',
'name': 'Stone Axe',
'uses': 132
},
'diamond_sword': {
'id': 276,
'category': 'Combat',
'name': 'Diamond Sword',
'uses': 1562
},
'diamond_shovel': {
'id': 277,
'category': 'Tools',
'name': 'Diamond Shovel',
'uses': 1562
},
'diamond_pickaxe': {
'id': 278,
'category': 'Tools',
'name': 'Diamond Pickaxe',
'uses': 1562
},
'diamond_axe': {
'id': 279,
'category': 'Tools',
'name': 'Diamond Axe',
'uses': 1562
},
'stick': {
'id': 280,
'category': 'Materials',
0: 'Stick'
},
'bowl': {
'id': 281,
'category': 'Materials',
0: 'Bowl'
},
'mushroom_stew': {
'id': 282,
'category': 'Foodstuffs',
0: 'Mushroom Stew'
},
'golden_sword': {
'id': 283,
'category': 'Combat',
'name': 'Golden Sword',
'uses': 33
},
'golden_shovel': {
'id': 284,
'category': 'Tools',
'name': 'Golden Shovel',
'uses': 33
},
'golden_pickaxe': {
'id': 285,
'category': 'Tools',
'name': 'Golden Pickaxe',
'uses': 33
},
'golden_axe': {
'id': 286,
'category': 'Tools',
'name': 'Golden Axe',
'uses': 33
},
'string': {
'id': 287,
'category': 'Materials',
0: 'String'
},
'feather': {
'id': 288,
'category': 'Materials',
0: 'Feather'
},
'gunpowder': {
'id': 289,
'category': 'Materials',
0: 'Gunpowder'
},
'wooden_hoe': {
'id': 290,
'category': 'Tools',
'name': 'Wooden Hoe',
'uses': 60
},
'stone_hoe': {
'id': 291,
'category': 'Tools',
'name': 'Stone Hoe',
'uses': 132
},
'iron_hoe': {
'id': 292,
'category': 'Tools',
'name': 'Iron Hoe',
'uses': 251
},
'diamond_hoe': {
'id': 293,
'category': 'Tools',
'name': 'Diamond Hoe',
'uses': 1562
},
'golden_hoe': {
'id': 294,
'category': 'Tools',
'names': 'Golden Hoe',
'uses': 33
},
'wheat_seeds': {
'id': 295,
'category': 'Materials',
0: 'Seeds'
},
'wheat': {
'id': 296,
'category': 'Materials',
0: 'Wheat'
},
'bread': {
'id': 297,
'category': 'Foodstuffs',
0: 'Bread'
},
'leather_helmet': {
'id': 298,
'category': 'Combat',
'name': 'Leather Cap',
'uses': 56,
'armor': 1,
'toughness': 0
},
'leather_chestplate': {
'id': 299,
'category': 'Combat',
'name': 'Leather Tunic',
'uses': 81,
'armor': 3,
'toughness': 0
},
'leather_leggings': {
'id': 300,
'category': 'Combat',
'name': 'Leather Pants',
'uses': 76,
'armor': 2,
'toughness': 0
},
'leather_boots': {
'id': 301,
'category': 'Combat',
'name': 'Leather Boots',
'uses': 66,
'armor': 1,
'toughness': 0
},
'chainmail_helmet': {
'id': 302,
'category': 'Combat',
'name': 'Chain Helmet',
'uses': 166,
'armor': 2,
'toughness': 0
},
'chainmail_chestplate': {
'id': 303,
'category': 'Combat',
'name': 'Chain Chestplate',
'uses': 241,
'armor': 5,
'toughness': 0
},
'chainmail_leggings': {
'id': 304,
'category': 'Combat',
'name': 'Chain Leggings',
'uses': 226,
'armor': 4,
'toughness': 0
},
'chainmail_boots': {
'id': 305,
'category': 'Combat',
'name': 'Chain Boots',
'uses': 196,
'armor': 1,
'toughness': 0
},
'iron_helmet': {
'id': 306,
'category': 'Combat',
'name': 'Iron Helmet',
'uses': 166,
'armor': 2,
'toughness': 0
},
'iron_chestplate': {
'id': 307,
'category': 'Combat',
'name': 'Iron Chestplate',
'uses': 241,
'armor': 6,
'toughness': 0
},
'iron_leggings': {
'id': 308,
'category': 'Combat',
'name': 'Iron Leggings',
'uses': 226,
'armor': 5,
'toughness': 0
},
'iron_boots': {
'id': 309,
'category': 'Combat',
'name': 'Iron Boots',
'uses': 196,
'armor': 2,
'toughness': 0
},
'diamond_helmet': {
'id': 310,
'category': 'Combat',
'name': 'Diamond Helmet',
'uses': 364,
'armor': 3,
'toughness': 2
},
'diamond_chestplate': {
'id': 311,
'category': 'Combat',
'name': 'Diamond Chestplate',
'uses': 529,
'armor': 8,
'toughness': 2
},
'diamond_leggings': {
'id': 312,
'category': 'Combat',
'name': 'Diamond Leggings',
'uses': 496,
'armor': 6,
'toughness': 2
},
'diamond_boots': {
'id': 313,
'category': 'Combat',
'name': 'Diamond Boots',
'uses': 430,
'armor': 3,
'toughness': 2
},
'golden_helmet': {
'id': 314,
'category': 'Combat',
'name': 'Golden Helmet',
'uses': 78,
'armor': 2,
'toughness': 0
},
'golden_chestplate': {
'id': 315,
'category': 'Combat',
'name': 'Golden Chestplate',
'uses': 113,
'armor': 5,
'toughness': 0
},
'golden_leggings': {
'id': 316,
'category': 'Combat',
'name': 'Golden Leggings',
'uses': 106,
'armor': 3,
'toughness': 0
},
'golden_boots': {
'id': 317,
'category': 'Combat',
'name': 'Golden Boots',
'uses': 92,
'armor': 1,
'toughness': 0
},
'flint': {
'id': 318,
'category': 'Materials',
0: 'Flint'
},
'porkchop': {
'id': 319,
'category': 'Foodstuffs',
0: 'Raw Porkchop'
},
'cooked_porkchop': {
'id': 320,
'category': 'Foodstuffs',
0: 'Cooked Porkchop'
},
'painting': {
'id': 321,
'category': 'Decoration Blocks',
0: 'Painting'
},
'golden_apple': {
'id': 322,
'category': 'Foodstuffs',
0: 'Golden Apple', # Regular.
1: 'Golden Apple' # Notch Apple.
},
'sign': {
'id': 323,
'category': 'Decoration Blocks',
0: 'Sign'
},
'wooden_door': {
'id': 324,
'category': 'Redstone',
0: 'Oak Door'
},
'bucket': {
'id': 325,
'category': 'Miscellaneous',
0: 'Bucket'
},
'water_bucket': {
'id': 326,
'category': 'Miscellaneous',
0: 'Water Bucket'
},
'lava_bucket': {
'id': 327,
'category': 'Miscellaneous',
0: 'Lava Bucket'
},
'minecart': {
'id': 328,
'category': 'Transportation',
0: 'Minecart'
},
'saddle': {
'id': 329,
'category': 'Transportation',
0: 'Saddle'
},
'iron_door': {
'id': 330,
'category': 'Redstone',
0: 'Iron Door'
},
'redstone': {
'id': 331,
'category': 'Redstone',
0: 'Redstone'
},
'snowball': {
'id': 332,
'category': 'Miscellaneous',
0: 'Snowball'
},
'boat': {
'id': 333,
'category': 'Transportation',
0: 'Oak Boat'
},
'leather': {
'id': 334,
'category': 'Materials',
0: 'Leather'
},
'milk_bucket': {
'id': 335,
'category': 'Miscellaneous',
0: 'Milk'
},
'brick': {
'id': 336,
'category': 'Materials',
0: 'Brick'
},
'clay_ball': {
'id': 337,
'category': 'Materials',
0: 'Clay'
},
'reeds': {
'id': 338,
'category': 'Materials',
0: 'Sugar Canes'
},
'paper': {
'id': 339,
'category': 'Miscellaneous',
0: 'Paper'
},
'book': {
'id': 340,
'category': 'Miscellaneous',
0: 'Book'
},
'slime_ball': {
'id': 341,
'category': 'Miscellaneous',
0: 'Slimeball'
},
'chest_minecart': {
'id': 342,
'category': 'Transportation',
0: 'Minecart with Chest'
},
'furnace_minecart': {
'id': 343,
'category': 'Transportation',
0: 'Minecart with Furnace'
},
'egg': {
'id': 334,
'category': 'Materials',
0: 'Egg'
},
'compass': {
'id': 345,
'category': 'Tools',
0: 'Compass'
},
'fishing_rod': {
'id': 346,
'category': 'Tools',
'name': 'Fishing Rod',
'uses': 65
},
'clock': {
'id': 347,
'category': 'Tools',
0: 'Clock'
},
'glowstone_dust': {
'id': 348,
'category': 'Materials',
0: 'Glowstone Dust'
},
'fish': {
'id': 349,
'category': 'Foodstuffs',
0: 'Raw Fish',
1: 'Raw Salmon',
2: 'Clownfish',
3: 'Pufferfish'
},
'cooked_fish': {
'id': 350,
'category': 'Foodstuffs',
0: 'Cooked Fish',
1: 'Cooked Salmon'
},
'dye': {
'id': 351,
'category': 'Materials',
0: 'Ink Sac',
1: 'Rose Red',
2: 'Cactus Green',
3: 'Cocoa Beans',
4: 'Lapis Lazuli',
5: 'Purple Dye',
6: 'Cyan Dye',
7: 'Light Gray Dye',
8: 'Gray Dye',
9: 'Pink Dye',
10: 'Lime Dye',
11: 'Dandelion Yellow',
12: 'Light Blue Dye',
13: 'Magenta Dye',
14: 'Orange Dye',
15: 'Bone Meal'
},
'bone': {
'id': 352,
'category': 'Miscellaneous',
0: 'Bone'
},
'sugar': {
'id': 353,
'category': 'Materials',
0: 'Sugar'
},
'cake': {
'id': 354,
'category': 'Foodstuffs',
0: 'Cake'
},
'bed': {
'id': 355,
'category': 'Decoration Blocks',
0: 'Bed'
},
'repeater': {
'id': 356,
'category': 'Redstone',
0: 'Redstone Repeater'
},
'cookie': {
'id': 357,
'category': 'Foodstuffs',
0: 'Cookie'
},
# No item 358?
'shears': {
'id': 359,
'category': 'Tools',
'name': 'Shears',
'uses': 238
},
'melon': {
'id': 360,
'category': 'Foodstuffs',
0: 'Melon'
},
'pumpkin_seeds': {
'id': 361,
'category': 'Materials',
0: 'Pumpkin Seeds'
},
'melon_seeds': {
'id': 362,
'category': 'Materials',
0: 'Melon Seeds'
},
'beef': {
'id': 363,
'category': 'Foodstuffs',
0: 'Raw Beef'
},
'cooked_beef': {
'id': 364,
'category': 'Foodstuffs',
0: 'Steak'
},
'chicken': {
'id': 365,
'category': 'Foodstuffs',
0: 'Raw Chicken'
},
'cooked_chicken': {
'id': 366,
'category': 'Foodstuffs',
0: 'Cooked Chicken'
},
'rotten_flesh': {
'id': 367,
'category': 'Foodstuffs',
0: 'Rotten Flesh'
},
'ender_pearl': {
'id': 368,
'category': 'Miscellaneous',
0: 'Ender Pearl'
},
'blaze_rod': {
'id': 369,
'category': 'Materials',
0: 'Blaze Rod'
},
'ghast_tear': {
'id': 370,
'category': 'Brewing',
0: 'Ghast Tear'
},
'gold_nugget': {
'id': 371,
'category': 'Materials',
0: 'Gold Nugget'
},
'nether_wart': {
'id': 372,
'category': 'Materials',
0: 'Nether Wart'
},
'potion': {
'id': 373,
'category': 'Brewing',
0: 'Potion' # Potions are stored as NBT data.
},
'glass_bottle': {
'id': 374,
'category': 'Brewing',
0: 'Glass Bottle'
},
'spider_eye': {
'id': 375,
'category': 'Foodstuffs',
0: 'Spider Eye'
},
'fermented_spider_eye': {
'id': 376,
'category': 'Brewing',
0: 'Fermented Spider Eye'
},
'blaze_powder': {
'id': 377,
'category': 'Brewing',
0: 'Blaze Powder'
},
'magma_cream': {
'id': 378,
'category': 'Brewing',
0: 'Magma Cream'
},
'brewing_stand': {
'id': 379,
'category': 'Brewing',
0: 'Brewing Stand'
},
'cauldron': {
'id': 380,
'category': 'Brewing',
0: 'Cauldron'
},
'ender_eye': {
'id': 381,
'category': 'Miscellaneous',
0: 'Eye of Ender'
},
'speckled_melon': {
'id': 382,
'category': 'Brewing',
0: 'Glistering Melon'
},
'spawn_egg': {
'id': 383,
'category': 'Miscellaneous',
0: 'Spawn Egg' # Entity data is stored as NBT data.
},
'experience_bottle': {
'id': 384,
'category': 'Miscellaneous',
0: 'Bottle o\' Enchanting'
},
'fire_charge': {
'id': 385,
'category': 'Miscellaneous',
0: 'Fire Charge'
},
'writable_book': {
'id': 386,
'category': 'Miscellaneous',
0: 'Book and Quill'
},
# No item 387?
'emerald': {
'id': 388,
'category': 'Materials',
0: 'Emerald'
},
'item_frame': {
'id': 389,
'category': 'Decoration Blocks',
0: 'Item Frame'
},
'flower_pot': {
'id': 390,
'category': 'Decoration Blocks',
0: 'Flower Pot'
},
'carrot': {
'id': 391,
'category': 'Foodstuff',
0: 'Carrot'
},
'potato': {
'id': 392,
'category': 'Foodstuff',
0: 'Potato'
},
'baked_potato': {
'id': 393,
'category': 'Foodstuffs',
0: 'Baked Potato'
},
'poisonous_potato': {
'id': 394,
'category': 'Foodstuffs',
0: 'Poisonous Potato'
},
'map': {
'id': 395,
'category': 'Miscellaneous',
0: 'Empty Map'
},
'golden_carrot': {
'id': 396,
'category': 'Brewing',
0: 'Golden Carrot'
},
'skull': {
'id': 397,
'category': 'Decoration Blocks',
0: 'Skeleton Skull',
1: 'Wither Skeleton Skull',
2: 'Zombie Head',
3: 'Head',
4: 'Creeper Head',
5: 'Dragon Head'
},
'carrot_on_a_stick': {
'id': 398,
'category': 'Transportation',
'name': 'Carrot on a Stick',
'uses': 26
},
'nether_star': {
'id': 399,
'category': 'Materials',
0: 'Nether Star'
},
'pumpkin_pie': {
'id': 400,
'category': 'Foodstuffs',
0: 'Pumpkin Pie'
},
# No item 401?
'firework_charge': {
'id': 402,
'category': 'Miscellaneous',
0: 'Firework Star'
},
'enchanted_book': {
'id': 403,
'category': 'Miscellaneous', # Category changes based on enchant.
0: 'Enchanted Book' # Enchant is stored as NBT data.
},
'comparator': {
'id': 404, # If you make a HTTP joke you will be slapped.
'category': 'Redstone',
0: 'Redstone Comparator'
},
'netherbrick': {
'id': 405,
'category': 'Materials',
0: 'Nether Brick'
},
'quartz': {
'id': 406,
'category': 'Materials',
0: 'Nether Quartz'
},
'tnt_minecart': {
'id': 407,
'category': 'Transportation',
0: 'Minecart with TNT'
},
'hopper_minecart': {
'id': 408,
'category': 'Transportation',
0: 'Minecart with Hopper'
},
'prismarine_shard': {
'id': 409,
'category': 'Materials',
0: 'Prismarine Shard'
},
'prismarine_crystals': {
'id': 410,
'category': 'Materials',
0: 'Prismarine Crystals'
},
'rabbit': {
'id': 411,
'category': 'Foodstuffs',
0: 'Raw Rabbit'
},
'cooked_rabbit': {
'id': 412,
'category': 'Foodstuffs',
0: 'Cooked Rabbit'
},
'rabbit_stew': {
'id': 413,
'category': 'Foodstuffs',
0: 'Rabbit Stew'
},
'rabbit_foot': {
'id': 414,
'category': 'Brewing',
0: 'Rabbit\'s Foot'
},
'rabbit_hide': {
'id': 415,
'category': 'Materials',
0: 'Rabbit Hide'
},
'armor_stand': {
'id': 416,
'category': 'Decoration Blocks',
0: 'Armor Stand'
},
'iron_horse_armor': {
'id': 417,
'category': 'Miscellaneous',
0: 'Iron Horse Armor'
},
'golden_horse_armor': {
'id': 418,
'category': 'Miscellaneous',
0: 'Gold Horse Armor'
},
'diamond_horse_armor': {
'id': 419,
'category': 'Miscellaneous',
0: 'Diamond Horse Armor'
},
'lead': {
'id': 420,
'category': 'Tools',
0: 'Lead'
},
'name_tag': {
'id': 421,
'category': 'Tools',
0: 'Name Tag'
},
# No item 422?
'mutton': {
'id': 423,
'category': 'Foodstuffs',
0: 'Raw Mutton'
},
'cooked_mutton': {
'id': 424,
'category': 'Foodstuffs',
0: 'Cooked Mutton'
},
'banner': {
'id': 425,
'category': 'Decoration Blocks',
0: 'Black Banner', # Colours are in reverse order...?
1: 'Red Banner',
2: 'Green Banner',
3: 'Brown Banner',
4: 'Blue Banner',
5: 'Purple Banner',
6: 'Cyan Banner',
7: 'Light Gray Banner',
8: 'Gray Banner',
9: 'Pink Banner',
10: 'Lime Banner',
11: 'Yellow Banner',
12: 'Light Blue Banner',
13: 'Magenta Banner',
14: 'Orange Banner',
15: 'White Banner'
},
'end_crystal': {
'id': 426,
'category': 'Decoration Blocks',
0: 'End Crystal'
},
'spruce_door': {
'id': 427,
'category': 'Redstone',
0: 'Spruce Door'
},
'birch_door': {
'id': 428,
'category': 'Redstone',
0: 'Birch Door'
},
'jungle_door': {
'id': 429,
'category': 'Redstone',
0: 'Jungle Door'
},
'acacia_door': {
'id': 430,
'category': 'Redstone',
0: 'Acacia Door'
},
'dark_oak_door': {
'id': 431,
'category': 'Redstone',
0: 'Dark Oak Door'
},
'chorus_fruit': {
'id': 432,
'category': 'Materials',
0: 'Chorus Fruit'
},
'chorus_fruit_popped': {
'id': 433,
'category': 'Materials',
0: 'Popped Chorus Fruit'
},
'beetroot': {
'id': 434,
'category': 'Foodstuffs',
0: 'Beetroot'
},
'beetroot_seeds': {
'id': 435,
'category': 'Materials',
0: 'Beetroot Seeds'
},
'beetroot_soup': {
'id': 436,
'category': 'Foodstuffs',
0: 'Beetroot Soup'
},
'dragon_breath': {
'id': 437,
'category': 'Brewing',
0: 'Dragon\'s Breath'
},
'splash_potion': {
'id': 438,
'category': 'Brewing',
0: 'Splash Potion' # Potion is stored as NBT data.
},
'spectral_arrow': {
'id': 439,
'category': 'Combat',
0: 'Spectral Arrow'
},
'tipped_arrow': {
'id': 440,
'category': 'Combat',
0: 'Tipped Arrow' # Arrow type is stored as NBT data.
},
'lingering_potion': {
'id': 441,
'category': 'Brewing',
0: 'Lingering Potion' # Potion is stored as NBT data.
},
'shield': {
'id': 442,
'category': 'Combat',
'name': 'Shield',
'uses': 337
},
'elytra': {
'id': 443,
'category': 'Transportation',
'name': 'Elytra',
'uses': 431
},
'spruce_boat': {
'id': 444,
'category': 'Transportation',
0: 'Spruce Boat'
},
'birch_boat': {
'id': 445,
'category': 'Transportation',
0: 'Birch Boat'
},
'jungle_boat': {
'id': 446,
'category': 'Transportation',
0: 'Jungle Boat'
},
'acacia_boat': {
'id': 447,
'category': 'Transportation',
0: 'Acacia Boat'
},
'dark_oak_boat': {
'id': 448,
'category': 'Transportation',
0: 'Dark Oak Boat'
},
# Missing item...
# ...
# Start of 2256 block.
'record_13': {
'id': 2256,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_cat': {
'id': 2257,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_blocks': {
'id': 2258,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_chirp': {
'id': 2259,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_far': {
'id': 2260,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_mall': {
'id': 2261,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_mellohi': {
'id': 2262,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_stal': {
'id': 2263,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_strad': {
'id': 2264,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_ward': {
'id': 2265,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_11': {
'id': 2266,
'category': 'Miscellaneous',
0: 'Music Disc'
},
'record_wait': {
'id': 2267,
'category': 'Miscellaneous',
0: 'Music Disc'
}
}
}
enchantments = {
'minecraft': {
'__VERSION__': 1.10,
'__LANGUAGE__': 'en_US',
# Begin Armour Block.
'protection': {
'id': 0,
'name': 'Protection'
},
'fire_protection': {
'id': 1,
'name': 'Fire Protection'
},
'feather_falling': {
'id': 2,
'name': 'Feather Falling'
},
'blast_protection': {
'id': 3,
'name': 'Blast Protection'
},
'projectile_protection': {
'id': 4,
'name': 'Projectile Protection'
},
'respiration': {
'id': 5,
'name': 'Respiration'
},
'aqua_affinity': {
'id': 6,
'name': 'Aqua Affinity'
},
'thorns': {
'id': 7,
'name': 'Thorns'
},
'depth_strider': {
'id': 8,
'name': 'Depth Strider'
},
'frost_walker': {
'id': 9,
'name': 'Frost Walker'
},
# End Armour Block.
# Begin Sword Block.
'sharpness': {
'id': 16,
'name': 'Sharpness'
},
'smite': {
'id': 17,
'name': 'Smite'
},
'bane_of_arthropods': {
'id': 18,
'name': 'Bane of Arthropods'
},
'knockback': {
'id': 19,
'name': 'Knockback'
},
'fire_aspect': {
'id': 20,
'name': 'Fire Aspect'
},
'looting': {
'id': 21,
'name': 'Looting'
},
# End Sword Block.
# Begin Tools Block.
'efficiency': {
'id': 32,
'name': 'Efficiency'
},
'silk_touch': {
'id': 33,
'name': 'Silk Touch'
},
'unbreaking': {
'id': 34,
'name': 'Unbreaking'
},
'fortune': {
'id': 35,
'name': 'Fortune'
},
# End Tools Block.
# Begin Bows Block.
'power': {
'id': 48,
'name': 'Power'
},
'punch': {
'id': 49,
'name': 'Punch'
},
'flame': {
'id': 50,
'name': 'Flame'
},
'infinity': {
'id': 51,
'name': 'Flame'
},
# End Bows Block.
# Begin Fishing Rods Block.
'luck_of_the_sea': {
'id': 61,
'name': 'Luck of the Sea'
},
'lure': {
'id': 62,
'name': 'Lure'
},
# End Fishing Rods Block.
# Begin Misc Block.
'mending': {
'id': 70,
'name': 'Mending'
}
# End Misc Block.
}
}
# Roman Numeral Conversion
# Inspired by: https://stackoverflow.com/a/28777781
romanNumerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I')
)
def intToRoman(number):
romanString = ''
for romanTuple in romanNumerals:
div, number = divmod(number, romanTuple[0])
romanString += romanTuple[1] * div
return romanString
def lookupItem(item, damage=0):
mod, item = item.split(':')
result = [None, None, None, None]
if mod in items and item in items[mod]:
if damage in items[mod][item]:
result[0] = items[mod][item][damage]
elif 'name' in items[mod][item]:
result[0] = items[mod][item]['name']
else:
result[0] = '[Unknown Name]'
if 'uses' in items[mod][item]:
result[1] = '{:.1%}'.format((items[mod][item]['uses'] - damage) / float(items[mod][item]['uses']))
if 'armor' in items[mod][item]:
result[2] = items[mod][item]['armor']
if 'toughness' in items[mod][item]:
result[3] = items[mod][item]['toughness']
else:
result[0] = '[Item Not Found]'
return result
def lookupNumericItem(itemNumeric, damage=0):
print('WARNING: Item numeric IDs are deprecated. Please use text IDs.')
result = [None, None, None, None]
for mod in items.values():
for item in mod.values():
if type(item) is dict and item['id'] == itemNumeric:
if damage in item:
result[0] = item[damage]
elif 'name' in item:
result[0] = item['name']
else:
result[0] = '[Unknown Name]'
if 'uses' in item:
result[1] = '{:.1%}'.format((item['uses'] - damage) / float(item['uses']))
if 'armor' in item:
result[2] = item['armor']
if 'toughness' in item:
result[3] = item['toughness']
break
if not result[0]:
result[0] = '[Item Not Found]'
return result
def lookupEnchant(enchant, level=None):
mod, enchant = enchant.split(':')
result = [None, None]
if mod in enchantments and enchant in enchantments[mod]:
if 'name' in enchantments[mod][enchant]:
result[0] = enchantments[mod][enchant]['name']
else:
result[0] = '[Unknown Name]'
else:
result[0] = '[Enchantment Not Found]'
if level:
result[1] = intToRoman(level)
return result
def lookupNumericEnchant(enchantNumeric, level=None):
result = [None, None]
for mod in enchantments.values():
for enchant in mod.values():
if type(enchant) is dict and enchant['id'] == enchantNumeric:
if 'name' in enchant:
result[0] = enchant['name']
else:
result[0] = '[Unknown Name]'
break
if not result[0]:
result[0] = '[Enchantment Not Found]'
if level:
result[1] = intToRoman(level)
return result
| gpl-3.0 |
giorgiop/scikit-learn | sklearn/calibration.py | 17 | 19402 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y``
is neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 45 | 2486 | # Author: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 126 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
ray-project/ray | rllib/algorithms/alpha_zero/tests/test_alpha_zero.py | 1 | 1264 | import unittest
import ray
import ray.rllib.algorithms.alpha_zero as az
from ray.rllib.algorithms.alpha_zero.models.custom_torch_models import DenseModel
from ray.rllib.examples.env.cartpole_sparse_rewards import CartPoleSparseRewards
from ray.rllib.utils.test_utils import (
check_train_results,
framework_iterator,
)
class TestAlphaZero(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_alpha_zero_compilation(self):
"""Test whether AlphaZero can be built with all frameworks."""
config = (
az.AlphaZeroConfig()
.environment(env=CartPoleSparseRewards)
.training(model={"custom_model": DenseModel})
)
num_iterations = 1
# Only working for torch right now.
for _ in framework_iterator(config, frameworks="torch"):
algo = config.build()
for i in range(num_iterations):
results = algo.train()
check_train_results(results)
print(results)
algo.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 |
GoogleCloudPlatform/cloudml-samples | tensorflow/standard/boston/trainer/utils.py | 1 | 3382 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import numpy as np
# Current working directory.
WORKING_DIR = os.getcwd()
# Temporary directory inside
TEMP_DIR = 'tmp/'
# Download file
BOSTON_FILE = 'boston_housing.npz'
def download_files_from_gcs(source, destination):
"""Download files from GCS to a WORKING_DIR/.
Args:
source: GCS path to the training data
destination: GCS path to the validation data.
Returns:
A list to the local data paths where the data is downloaded.
"""
local_file_names = [destination]
gcs_input_paths = [source]
# Copy raw files from GCS into local path.
raw_local_files_data_paths = [os.path.join(WORKING_DIR, local_file_name)
for local_file_name in local_file_names
]
for i, gcs_input_path in enumerate(gcs_input_paths):
if gcs_input_path:
subprocess.check_call(
['gsutil', 'cp', gcs_input_path, raw_local_files_data_paths[i]])
return raw_local_files_data_paths
def load_data(path='boston_housing.npz', test_split=0.2, seed=113):
"""Loads the Boston Housing dataset.
Args:
path: path where to cache the dataset locally (relative to
~/.keras/datasets).
test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data before computing the test split.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: No dataset file defined.
"""
assert 0 <= test_split < 1
if not path:
raise ValueError('No dataset file defined')
if path.startswith('gs://'):
download_files_from_gcs(path, destination=BOSTON_FILE)
path = BOSTON_FILE
with np.load(path) as f:
x = f['x']
y = f['y']
np.random.seed(seed)
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[:int(len(x) * (1 - test_split))])
y_train = np.array(y[:int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)):])
y_test = np.array(y[int(len(x) * (1 - test_split)):])
return (x_train, y_train), (x_test, y_test)
def normalize_data(train_data, test_data):
"""Normalize features with different scales and ranges.
Subtract the mean of the feature and divide by the standard deviation.
Test data is *not* used when calculating the mean and std.
Args:
train_data: (numpy.darray) Training data.
test_data: (numpy.darray) Testing data.
Returns:
A tuple of training and test data.
"""
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data = (train_data - mean) / std
test_data = (test_data - mean) / std
return train_data, test_data
| apache-2.0 |
xlqian/navitia | source/tyr/tyr/tasks.py | 1 | 28408 | # Copyright (c) 2001-2022, Hove and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Hove (www.hove.com).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import glob
import logging
import os
import shutil
import re
import zipfile
from celery import chain
from celery.signals import task_postrun
from flask import current_app
import kombu
from tyr.binarisation import (
gtfs2ed,
osm2ed,
ed2nav,
fusio2ed,
geopal2ed,
fare2ed,
poi2ed,
synonym2ed,
shape2ed,
load_bounding_shape,
bano2mimir,
openaddresses2mimir,
osm2mimir,
stops2mimir,
ntfs2mimir,
cosmogony2mimir,
poi2mimir,
fusio2s3,
gtfs2s3,
zip_if_needed,
)
from tyr.binarisation import reload_data, move_to_backupdirectory
from tyr import celery
from navitiacommon import models, task_pb2, utils
from tyr.helper import load_instance_config, get_instance_logger, is_activate_autocomplete_version
from navitiacommon.launch_exec import launch_exec
from datetime import datetime, timedelta
@celery.task()
def finish_job(job_id):
"""
use for mark a job as done after all the required task has been executed
"""
job = models.Job.query.get(job_id)
if job.state != 'failed':
job.state = 'done'
models.db.session.commit()
def import_data(
files,
instance,
backup_file,
asynchronous=True,
reload=True,
custom_output_dir=None,
skip_mimir=False,
skip_2ed=False,
):
"""
import the data contains in the list of 'files' in the 'instance'
:param files: files to import
:param instance: instance to receive the data
:param backup_file: If True the files are moved to a backup directory, else they are not moved
:param asynchronous: If True all jobs are run in background, else the jobs are run in sequence the function
will only return when all of them are finish
:param reload: If True kraken would be reload at the end of the treatment
:param custom_output_dir: subdirectory for the nav file created. If not given, the instance default one is taken
:param skip_mimir: skip importing data into mimir
:param skip_2ed: skip inserting last_load_dataset files into ed database
run the whole data import process:
- data import in bdd (fusio2ed, gtfs2ed, poi2ed, ...)
- export bdd to nav file
- update the jormungandr db with the new data for the instance
- reload the krakens
"""
actions = []
job = models.Job()
instance_config = load_instance_config(instance.name)
job.instance = instance
job.state = 'running'
task = {
'gtfs': gtfs2ed,
'fusio': fusio2ed,
'osm': osm2ed,
'geopal': geopal2ed,
'fare': fare2ed,
'poi': poi2ed,
'synonym': synonym2ed,
'shape': shape2ed,
}
def process_ed2nav():
models.db.session.add(job)
models.db.session.commit()
# We pass the job id to each tasks, but job need to be commited for having an id
for action in actions:
action.kwargs['job_id'] = job.id
# Create binary file (New .nav.lz4)
binarisation = [ed2nav.si(instance_config, job.id, custom_output_dir)]
actions.append(chain(*binarisation))
# Reload kraken with new data after binarisation (New .nav.lz4)
if reload:
actions.append(reload_data.si(instance_config, job.id))
if not skip_mimir:
for dataset in job.data_sets:
actions.extend(send_to_mimir(instance, dataset.name, dataset.family_type))
else:
current_app.logger.info("skipping mimir import")
actions.append(finish_job.si(job.id))
# We should delete old backup directories related to this instance
actions.append(purge_instance.si(instance.id, current_app.config['DATASET_MAX_BACKUPS_TO_KEEP']))
if asynchronous:
return chain(*actions).delay()
else:
# all job are run in sequence and import_data will only return when all the jobs are finish
return chain(*actions).apply()
if skip_2ed:
# For skip_2ed, skip inserting last_load_dataset files into ed database
return process_ed2nav()
for _file in files:
filename = None
dataset = models.DataSet()
# NOTE: for the moment we do not use the path to load the data here
# but we'll need to refactor this to take it into account
try:
dataset.type, _ = utils.type_of_data(_file)
dataset.family_type = utils.family_of_data(dataset.type)
except Exception:
if backup_file:
move_to_backupdirectory(_file, instance_config.backup_directory)
current_app.logger.debug(
"Corrupted source file : {} moved to {}".format(_file, instance_config.backup_directory)
)
continue
if dataset.type in task:
if backup_file:
filename = move_to_backupdirectory(_file, instance_config.backup_directory, manage_sp_char=True)
else:
filename = _file
has_pt_planner_loki = (
hasattr(instance, 'pt_planners_configurations') and "loki" in instance.pt_planners_configurations
)
if has_pt_planner_loki:
loki_data_source = instance.pt_planners_configurations.get('loki', {}).get('data_source')
if loki_data_source is not None:
if loki_data_source == "minio":
if dataset.type == "fusio":
actions.append(fusio2s3.si(instance_config, filename, dataset_uid=dataset.uid))
if dataset.type == "gtfs":
actions.append(gtfs2s3.si(instance_config, filename, dataset_uid=dataset.uid))
elif loki_data_source == "local" and dataset.type in ["fusio", "gtfs"]:
zip_file = zip_if_needed(filename)
dest = os.path.join(os.path.dirname(instance_config.target_file), "ntfs.zip")
shutil.copy(zip_file, dest)
else:
current_app.logger.debug(
"unknown loki data_source '{}' for coverage '{}'".format(
loki_data_source, instance.name
)
)
actions.append(task[dataset.type].si(instance_config, filename, dataset_uid=dataset.uid))
else:
# unknown type, we skip it
current_app.logger.debug("unknown file type: {} for file {}".format(dataset.type, _file))
continue
# currently the name of a dataset is the path to it
dataset.name = filename
dataset.state = "pending"
models.db.session.add(dataset)
job.data_sets.append(dataset)
if actions:
return process_ed2nav()
def send_to_mimir(instance, filename, family_type):
"""
:param instance: instance to receive the data
:param filename: file to inject towards mimir
:param family_type: dataset's family type
- create a job with a data_set
- data injection towards mimir(stops2mimir, ntfs2mimir, poi2mimir)
returns action list
"""
# if mimir isn't setup do not try to import data for the autocompletion
if not any([is_activate_autocomplete_version(2) or is_activate_autocomplete_version(7)]):
return []
# Bail out if the family type is not one that mimir deals with.
if family_type not in ['pt', 'poi']:
return []
# This test is to avoid creating a new job if there is no action on mimir.
if not (instance.import_ntfs_in_mimir or instance.import_stops_in_mimir):
return []
actions = []
job = models.Job()
job.instance = instance
job.state = 'running'
if is_activate_autocomplete_version(7):
dataset_es7 = create_and_get_dataset(ds_type="fusio", family_type="mimir7", filename=filename)
models.db.session.add(dataset_es7)
job.data_sets.append(dataset_es7)
if is_activate_autocomplete_version(2):
dataset_es2 = create_and_get_dataset(ds_type="fusio", family_type="mimir", filename=filename)
models.db.session.add(dataset_es2)
job.data_sets.append(dataset_es2)
models.db.session.add(job)
models.db.session.commit()
for version in (2, 7):
if not is_activate_autocomplete_version(version):
logging.getLogger(__name__).info("Disable import mimir version {}".format(version))
continue
ds = dataset_es7 if version == 7 else dataset_es2
if family_type == 'pt':
# Import ntfs in Mimir
if instance.import_ntfs_in_mimir:
actions.append(ntfs2mimir.si(instance.name, filename, version, job.id, dataset_uid=ds.uid))
# Import stops in Mimir.
# if we are loading pt data we might want to load the stops to autocomplete
# This action is deprecated: https://github.com/hove-io/mimirsbrunn/blob/4430eed1d81247fffa7cf32ba675a9c5ad8b1cbe/documentation/components.md#stops2mimir
if instance.import_stops_in_mimir and not instance.import_ntfs_in_mimir:
actions.append(stops2mimir.si(instance.name, filename, version, job.id, dataset_uid=ds.uid))
else: # assume family_type == 'poi':
actions.append(poi2mimir.si(instance.name, filename, version, job.id, dataset_uid=ds.uid))
actions.append(finish_job.si(job.id))
return actions
@celery.task()
def update_data():
for instance in models.Instance.query_existing().all():
current_app.logger.debug("Update data of : {}".format(instance.name))
instance_config = None
try:
instance_config = load_instance_config(instance.name)
except:
current_app.logger.exception("impossible to load instance configuration for %s", instance.name)
# Do not stop the task if only one instance is missing
continue
files = glob.glob(instance_config.source_directory + "/*")
if files:
import_data(files, instance, backup_file=True)
BANO_REGEXP = re.compile('.*bano.*')
COSMOGONY_REGEXP = re.compile('.*cosmogony.*')
OPEN_ADDRESSES_REGEXP = re.compile('.*csv')
def create_and_get_dataset(ds_type, family_type, filename):
dataset = models.DataSet()
dataset.family_type = family_type
dataset.type = ds_type
# currently the name of a dataset is the path to it
dataset.name = filename
return dataset
def type_of_autocomplete_data(filename):
"""
return the type of autocomplete data of the files
filename can be either a directory, a file or a list of files
return can be:
- 'bano'
- 'osm'
- 'cosmogony'
- 'oa'
"""
def files_type(files):
# first we try fusio, because it can load fares too
if any(f for f in files if BANO_REGEXP.match(f)):
return 'bano'
if len(files) == 1 and COSMOGONY_REGEXP.match(files[0]):
return 'cosmogony'
if len(files) == 1 and files[0].endswith('.pbf'):
return 'osm'
# OpenAddresses files does not have a predefined naming,
# so we check it last, and consider all csv as OA
if any(f for f in files if OPEN_ADDRESSES_REGEXP.match(f)):
return 'oa'
return None
if not isinstance(filename, list):
if filename.endswith('.zip'):
zipf = zipfile.ZipFile(filename)
files = zipf.namelist()
elif os.path.isdir(filename):
files = glob.glob(filename + "/*")
else:
files = [filename]
else:
files = filename
return files_type(files)
@celery.task()
def import_autocomplete(files, autocomplete_instance, asynchronous=True, backup_file=True):
"""
Import the autocomplete'instance data files
"""
job = models.Job()
job.state = 'running'
actions = []
task = {
'bano': {2: bano2mimir, 7: bano2mimir},
'oa': {2: openaddresses2mimir, 7: openaddresses2mimir},
'osm': {2: osm2mimir, 7: osm2mimir},
'cosmogony': {2: cosmogony2mimir, 7: cosmogony2mimir},
}
autocomplete_dir = current_app.config['TYR_AUTOCOMPLETE_DIR']
# it's important for the admin to be loaded first, then addresses, then street, then poi
import_order = ['cosmogony', 'bano', 'oa', 'osm']
files_and_types = [(f, type_of_autocomplete_data(f)) for f in files]
files_and_types = sorted(files_and_types, key=lambda f_t: import_order.index(f_t[1]))
for f, ftype in files_and_types:
if ftype not in task:
# unknown type, we skip it
current_app.logger.debug("unknown file type: {} for file {}".format(ftype, f))
continue
filename = f
if backup_file:
filename = move_to_backupdirectory(
f, autocomplete_instance.backup_dir(autocomplete_dir), manage_sp_char=True
)
for version, executable in task[ftype].items():
if not is_activate_autocomplete_version(version):
current_app.logger.debug("Autocomplete version {} is disableed".format(version))
continue
dataset = create_and_get_dataset(
ds_type=ftype, family_type='autocomplete_{}'.format(ftype), filename=filename
)
actions.append(
executable.si(
autocomplete_instance,
filename=filename,
job_id=job.id,
dataset_uid=dataset.uid,
autocomplete_version=version,
)
)
models.db.session.add(dataset)
job.data_sets.append(dataset)
job.autocomplete_params_id = autocomplete_instance.id
if not actions:
return
models.db.session.add(job)
models.db.session.commit()
for action in actions:
action.kwargs['job_id'] = job.id
actions.append(finish_job.si(job.id))
if asynchronous:
return chain(*actions).delay(), job
else:
# all job are run in sequence and import_data will only return when all the jobs are finish
return chain(*actions).apply(), job
@celery.task()
def import_in_mimir(_file, instance, asynchronous=True):
"""
Import pt data stops to autocomplete
"""
datatype, _ = utils.type_of_data(_file)
family_type = utils.family_of_data(datatype)
current_app.logger.debug("Import {} data to mimir".format(family_type))
actions = []
for version in (2, 7):
if not is_activate_autocomplete_version(version):
logging.getLogger(__name__).info("Disable import mimir version {}".format(version))
continue
if family_type == 'pt':
if instance.import_ntfs_in_mimir:
actions.append(ntfs2mimir.si(instance.name, _file, version))
# Deprecated: https://github.com/hove-io/mimirsbrunn/blob/4430eed1d81247fffa7cf32ba675a9c5ad8b1cbe/documentation/components.md#stops2mimir
if instance.import_stops_in_mimir and not instance.import_ntfs_in_mimir:
actions.append(stops2mimir.si(instance.name, _file, version))
elif family_type == 'poi':
actions.append(poi2mimir.si(instance.name, _file, version))
else:
current_app.logger.warning("Unsupported family_type {}".format(family_type))
if asynchronous:
return chain(*actions).delay()
else:
# all job are run in sequence and import_in_mimir will only return when all the jobs are finish
return chain(*actions).apply()
@celery.task()
def update_autocomplete():
current_app.logger.debug("Update autocomplete data")
autocomplete_dir = current_app.config['TYR_AUTOCOMPLETE_DIR']
for autocomplete_instance in models.AutocompleteParameter.query.all():
files = glob.glob(autocomplete_instance.source_dir(autocomplete_dir) + "/*")
if files:
import_autocomplete(files, autocomplete_instance, backup_file=True)
@celery.task()
def purge_datasets():
instances = models.Instance.query_existing().all()
current_app.logger.info("Instances to purge: {}".format(instances))
for instance in instances:
try:
purge_instance(instance.id, current_app.config['DATASET_MAX_BACKUPS_TO_KEEP'])
except Exception as e:
# Do not stop the task for all other instances if only one instance is missing
current_app.logger.error("Dataset purge failed for instance {i}: {e}".format(i=instance, e=e))
@celery.task()
def purge_instance(instance_id, nb_to_keep):
instance = models.Instance.query.get(instance_id)
logger = get_instance_logger(instance)
logger.info('purge of backup directories for %s', instance.name)
try:
instance_config = load_instance_config(instance.name)
except Exception as e:
logger.error("Impossible to load instance configuration for {i}: {e}".format(i=instance.name, e=e))
return
backups = set(glob.glob('{}/*'.format(instance_config.backup_directory)))
logger.info('backups are: %s', backups)
# we add the realpath not to have problems with double / or stuff like that
loaded = set(
os.path.realpath(os.path.dirname(dataset.name)) for dataset in instance.last_datasets(nb_to_keep)
)
logger.info('loaded data are: %s', loaded)
running = set(os.path.realpath(os.path.dirname(dataset.name)) for dataset in instance.running_datasets())
logger.info('running bina are: %s', running)
to_remove = [os.path.join(instance_config.backup_directory, f) for f in backups - loaded - running]
missing = [l for l in loaded if l not in backups]
if missing:
logger.error(
"MISSING backup files! impossible to find %s in the backup dir, "
"we skip the purge, repair ASAP to fix the purge",
missing,
)
return
logger.info('we remove: %s', to_remove)
for path in to_remove:
shutil.rmtree(path)
def purge_cities():
"""
Delete old 'cities' jobs and the associated dataset in db and on disk
"""
nb_datasets_to_keep = current_app.config.get('DATASET_MAX_BACKUPS_TO_KEEP', 1)
cities_job = (
models.Job.query.join(models.DataSet)
.filter(models.DataSet.type == 'cities')
.order_by(models.Job.created_at.desc())
.all()
)
cities_job_to_keep = cities_job[:nb_datasets_to_keep]
datasets_to_keep = [job.data_sets.first().name for job in cities_job_to_keep]
for job in cities_job[nb_datasets_to_keep:]:
logging.info(" - Remove JOB {}".format(job.id))
dataset = job.data_sets.first()
logging.info(" Remove associated DATASET {}".format(dataset.id))
models.db.session.delete(dataset)
if os.path.exists(dataset.name) and dataset.name not in datasets_to_keep:
logging.info(" - delete file {}".format(dataset.name))
shutil.rmtree('{}'.format(dataset.name))
models.db.session.delete(job)
models.db.session.commit()
@celery.task()
def purge_jobs(days_to_keep=None):
"""
Delete old jobs in database and backup folders associated
:param days_to_keep: Period of time to keep jobs (in days). The default value is 'JOB_MAX_PERIOD_TO_KEEP'
"""
if days_to_keep is None:
days_to_keep = current_app.config.get('JOB_MAX_PERIOD_TO_KEEP', 60)
time_limit = datetime.utcnow() - timedelta(days=int(days_to_keep))
# Purge all instances (even discarded = true)
instances = models.Instance.query_all().all()
logger = logging.getLogger(__name__)
logger.info('Purge old jobs and datasets backup created before {}'.format(time_limit))
for instance in instances:
datasets_to_delete = instance.delete_old_jobs_and_list_datasets(time_limit)
if datasets_to_delete:
backups_to_delete = set(os.path.realpath(os.path.dirname(dataset)) for dataset in datasets_to_delete)
logger.info('backups_to_delete are: {}'.format(backups_to_delete))
for path in backups_to_delete:
if os.path.exists(path):
shutil.rmtree('{}'.format(path))
else:
logger.warning('Folder {} can\'t be found'.format(path))
# Purge 'cities' jobs (which aren't associated to an instance)
purge_cities()
@celery.task()
def scan_instances():
for instance_file in glob.glob(current_app.config['INSTANCES_DIR'] + '/*.ini'):
instance_name = os.path.basename(instance_file).replace('.ini', '')
instance = models.Instance.query_all().filter_by(name=instance_name).first()
if not instance:
current_app.logger.info('new instances detected: %s', instance_name)
instance = models.Instance(name=instance_name)
instance_config = load_instance_config(instance.name)
instance.is_free = instance_config.is_free
# by default we will consider an free instance as an opendata one
instance.is_open_data = instance_config.is_free
models.db.session.add(instance)
models.db.session.commit()
@celery.task()
def reload_kraken(instance_id):
instance = models.Instance.query.get(instance_id)
job = models.Job()
job.instance = instance
job.state = 'running'
instance_config = load_instance_config(instance.name)
models.db.session.add(job)
models.db.session.commit()
chain(reload_data.si(instance_config, job.id), finish_job.si(job.id)).delay()
logging.info("Task reload kraken for instance {} queued".format(instance.name))
@celery.task()
def build_all_data():
for instance in models.Instance.query_existing().all():
build_data(instance)
@celery.task()
def build_data(instance):
job = models.Job()
job.instance = instance
job.state = 'running'
instance_config = load_instance_config(instance.name)
models.db.session.add(job)
models.db.session.commit()
chain(ed2nav.si(instance_config, job.id, None), finish_job.si(job.id)).delay()
current_app.logger.info("Job build data of : %s queued" % instance.name)
@celery.task()
def load_data(instance_id, data_dirs):
instance = models.Instance.query.get(instance_id)
import_data(data_dirs, instance, backup_file=False, asynchronous=False)
@celery.task()
def cities(file_path, job_id, exe):
"""Launch 'cities' or 'cosmogony2cities'"""
job = models.Job.query.get(job_id)
res = -1
try:
res = launch_exec(
"{}".format(exe),
['-i', file_path, '--connection-string', current_app.config['CITIES_DATABASE_URI']],
logging,
)
if res != 0:
job.state = 'failed'
logging.error('{} failed'.format(exe))
else:
job.state = 'done'
except Exception as e:
logging.exception('{} exception : {}'.format(exe, e.message))
job.state = 'failed'
models.db.session.commit()
raise
models.db.session.commit()
logging.info('Import of {} finished'.format(exe))
return res
@celery.task()
def bounding_shape(instance_name, shape_path):
"""Set the bounding shape to a custom value"""
instance_conf = load_instance_config(instance_name)
load_bounding_shape(instance_name, instance_conf, shape_path)
@task_postrun.connect
def close_session(*args, **kwargs):
# Flask SQLAlchemy will automatically create new sessions for you from
# a scoped session factory, given that we are maintaining the same app
# context, this ensures tasks have a fresh session (e.g. session errors
# won't propagate across tasks)
models.db.session.remove()
@celery.task()
def heartbeat():
"""
send a heartbeat to all kraken
"""
logging.info('ping krakens!!')
with kombu.Connection(current_app.config['KRAKEN_BROKER_URL']) as connection:
instances = models.Instance.query_existing().all()
task = task_pb2.Task()
task.action = task_pb2.HEARTBEAT
for instance in instances:
try:
config = load_instance_config(instance.name)
exchange = kombu.Exchange(config.exchange, 'topic', durable=True)
producer = connection.Producer(exchange=exchange)
producer.publish(task.SerializeToString(), routing_key='{}.task.heartbeat'.format(instance.name))
except Exception as e:
logging.error("Could not ping krakens for instance {i}: {e}".format(i=instance, e=e))
@celery.task()
def create_autocomplete_depot(name):
autocomplete_dir = current_app.config['TYR_AUTOCOMPLETE_DIR']
autocomplete = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
main_dir = autocomplete.main_dir(autocomplete_dir)
try:
if not os.path.exists(main_dir):
os.makedirs(main_dir)
source = autocomplete.source_dir(autocomplete_dir)
if not os.path.exists(source):
os.makedirs(source)
backup = autocomplete.backup_dir(autocomplete_dir)
if not os.path.exists(backup):
os.makedirs(backup)
except OSError:
logging.error('create directory {} failed'.format(main_dir))
@celery.task()
def remove_autocomplete_depot(name):
logging.info('removing instance dir for {}'.format(name))
autocomplete_dir = current_app.config['TYR_AUTOCOMPLETE_DIR']
if os.path.exists(autocomplete_dir):
autocomplete = models.AutocompleteParameter.query.filter_by(name=name).first_or_404()
main_dir = autocomplete.main_dir(autocomplete_dir)
if os.path.exists(main_dir):
shutil.rmtree(main_dir)
else:
logging.warning('no autocomplete directory for {}, removing nothing'.format(autocomplete_dir))
else:
logging.warning('no main autocomplete directory, removing nothing')
@celery.task()
def purge_autocomplete():
logger = logging.getLogger(__name__)
autocomplete_instances = models.AutocompleteParameter.query.all()
for ac_instance in autocomplete_instances:
logger.info('purging autocomplete backup directories for %s', ac_instance.name)
max_backups = current_app.config.get('AUOTOCOMPLETE_MAX_BACKUPS_TO_KEEP', 5)
dir_to_keep = set(
os.path.realpath(os.path.dirname(dataset.name)) for dataset in ac_instance.last_datasets(max_backups)
)
autocomplete_dir = current_app.config['TYR_AUTOCOMPLETE_DIR']
backup_dir = os.path.join(autocomplete_dir, ac_instance.name, 'backup')
all_backups = set(os.path.join(backup_dir, backup) for backup in os.listdir(backup_dir))
to_remove = all_backups - dir_to_keep
for directory in to_remove:
if os.path.exists(directory):
try:
logger.info('removing backup directory: %s', directory)
shutil.rmtree(directory)
except Exception as e:
logger.info('cannot purge directory: %s because: %s', directory, str(e))
| agpl-3.0 |
cainiaocome/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 120 | 3429 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
ray-project/ray | rllib/examples/models/custom_loss_model.py | 1 | 6120 | import numpy as np
from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.offline import JsonReader
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class CustomLossModel(TFModelV2):
"""Custom model that adds an imitation loss on top of the policy loss."""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.fcnet = FullyConnectedNetwork(
self.obs_space, self.action_space, num_outputs, model_config, name="fcnet"
)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Delegate to our FCNet.
return self.fcnet(input_dict, state, seq_lens)
@override(ModelV2)
def value_function(self):
# Delegate to our FCNet.
return self.fcnet.value_function()
@override(ModelV2)
def custom_loss(self, policy_loss, loss_inputs):
# Create a new input reader per worker.
reader = JsonReader(self.model_config["custom_model_config"]["input_files"])
input_ops = reader.tf_input_ops()
# Define a secondary loss by building a graph copy with weight sharing.
obs = restore_original_dimensions(
tf.cast(input_ops["obs"], tf.float32), self.obs_space
)
logits, _ = self.forward({"obs": obs}, [], None)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
# loss can be added as follows:
# ae_loss = squared_diff(
# loss_inputs["obs"], Decoder(self.fcnet.last_layer))
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# Compute the IL loss.
action_dist = Categorical(logits, self.model_config)
self.policy_loss = policy_loss
self.imitation_loss = tf.reduce_mean(-action_dist.logp(input_ops["actions"]))
return policy_loss + 10 * self.imitation_loss
def metrics(self):
return {
"policy_loss": self.policy_loss,
"imitation_loss": self.imitation_loss,
}
class TorchCustomLossModel(TorchModelV2, nn.Module):
"""PyTorch version of the CustomLossModel above."""
def __init__(
self, obs_space, action_space, num_outputs, model_config, name, input_files
):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
nn.Module.__init__(self)
self.input_files = input_files
# Create a new input reader per worker.
self.reader = JsonReader(self.input_files)
self.fcnet = TorchFC(
self.obs_space, self.action_space, num_outputs, model_config, name="fcnet"
)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Delegate to our FCNet.
return self.fcnet(input_dict, state, seq_lens)
@override(ModelV2)
def value_function(self):
# Delegate to our FCNet.
return self.fcnet.value_function()
@override(ModelV2)
def custom_loss(self, policy_loss, loss_inputs):
"""Calculates a custom loss on top of the given policy_loss(es).
Args:
policy_loss (List[TensorType]): The list of already calculated
policy losses (as many as there are optimizers).
loss_inputs: Struct of np.ndarrays holding the
entire train batch.
Returns:
List[TensorType]: The altered list of policy losses. In case the
custom loss should have its own optimizer, make sure the
returned list is one larger than the incoming policy_loss list.
In case you simply want to mix in the custom loss into the
already calculated policy losses, return a list of altered
policy losses (as done in this example below).
"""
# Get the next batch from our input files.
batch = self.reader.next()
# Define a secondary loss by building a graph copy with weight sharing.
obs = restore_original_dimensions(
torch.from_numpy(batch["obs"]).float().to(policy_loss[0].device),
self.obs_space,
tensorlib="torch",
)
logits, _ = self.forward({"obs": obs}, [], None)
# You can also add self-supervised losses easily by referencing tensors
# created during _build_layers_v2(). For example, an autoencoder-style
# loss can be added as follows:
# ae_loss = squared_diff(
# loss_inputs["obs"], Decoder(self.fcnet.last_layer))
print("FYI: You can also use these tensors: {}, ".format(loss_inputs))
# Compute the IL loss.
action_dist = TorchCategorical(logits, self.model_config)
imitation_loss = torch.mean(
-action_dist.logp(
torch.from_numpy(batch["actions"]).to(policy_loss[0].device)
)
)
self.imitation_loss_metric = imitation_loss.item()
self.policy_loss_metric = np.mean([loss.item() for loss in policy_loss])
# Add the imitation loss to each already calculated policy loss term.
# Alternatively (if custom loss has its own optimizer):
# return policy_loss + [10 * self.imitation_loss]
return [loss_ + 10 * imitation_loss for loss_ in policy_loss]
def metrics(self):
return {
"policy_loss": self.policy_loss_metric,
"imitation_loss": self.imitation_loss_metric,
}
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/cluster/tests/test_spectral.py | 71 | 7950 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
snap-stanford/ogb | examples/lsc/mag240m/sgc.py | 1 | 5115 | import time
import argparse
import torch
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.nn import ModuleList, Linear, BatchNorm1d, Identity
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
from root import ROOT
class MLP(torch.nn.Module):
def __init__(self, in_channels: int, hidden_channels: int,
out_channels: int, num_layers: int, dropout: float = 0.0,
batch_norm: bool = True, relu_last: bool = False):
super(MLP, self).__init__()
self.lins = ModuleList()
self.lins.append(Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(Linear(hidden_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.batch_norms = ModuleList()
for _ in range(num_layers - 1):
norm = BatchNorm1d(hidden_channels) if batch_norm else Identity()
self.batch_norms.append(norm)
self.dropout = dropout
self.relu_last = relu_last
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
for batch_norm in self.batch_norms:
batch_norm.reset_parameters()
def forward(self, x):
for lin, batch_norm in zip(self.lins[:-1], self.batch_norms):
x = lin(x)
if self.relu_last:
x = batch_norm(x).relu_()
else:
x = batch_norm(x.relu_())
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return x
def train(model, x_train, y_train, batch_size, optimizer):
model.train()
total_loss = 0
for idx in DataLoader(range(y_train.size(0)), batch_size, shuffle=True):
optimizer.zero_grad()
loss = F.cross_entropy(model(x_train[idx]), y_train[idx])
loss.backward()
optimizer.step()
total_loss += float(loss) * idx.numel()
return total_loss / y_train.size(0)
@torch.no_grad()
def test(model, x_eval, y_eval, evaluator):
model.eval()
y_pred = model(x_eval).argmax(dim=-1)
return evaluator.eval({'y_true': y_eval, 'y_pred': y_pred})['acc']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--layer', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=512)
parser.add_argument('--num_layers', type=int, default=3),
parser.add_argument('--no_batch_norm', action='store_true')
parser.add_argument('--relu_last', action='store_true')
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--batch_size', type=int, default=380000)
parser.add_argument('--epochs', type=int, default=1000)
args = parser.parse_args()
print(args)
torch.manual_seed(12345)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
dataset = MAG240MDataset(ROOT)
evaluator = MAG240MEvaluator()
train_idx = dataset.get_idx_split('train')
valid_idx = dataset.get_idx_split('valid')
test_idx = dataset.get_idx_split('test-dev')
t = time.perf_counter()
print('Reading node features...', end=' ', flush=True)
x_train = np.load(f'{dataset.dir}/x_train_{args.layer}.npy')
x_train = torch.from_numpy(x_train).to(device)
x_valid = np.load(f'{dataset.dir}/x_valid_{args.layer}.npy')
x_valid = torch.from_numpy(x_valid).to(device)
x_test = np.load(f'{dataset.dir}/x_test_{args.layer}.npy')
x_test = torch.from_numpy(x_test).to(device)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
y_train = torch.from_numpy(dataset.paper_label[train_idx])
y_train = y_train.to(device, torch.long)
y_valid = torch.from_numpy(dataset.paper_label[valid_idx])
y_valid = y_valid.to(device, torch.long)
model = MLP(dataset.num_paper_features, args.hidden_channels,
dataset.num_classes, args.num_layers, args.dropout,
not args.no_batch_norm, args.relu_last).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
num_params = sum([p.numel() for p in model.parameters()])
print(f'#Params: {num_params}')
best_valid_acc = 0
for epoch in range(1, args.epochs + 1):
loss = train(model, x_train, y_train, args.batch_size, optimizer)
train_acc = test(model, x_train, y_train, evaluator)
valid_acc = test(model, x_valid, y_valid, evaluator)
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
with torch.no_grad():
model.eval()
res = {'y_pred': model(x_test).argmax(dim=-1)}
evaluator.save_test_submission(res, 'results/sgc', mode = 'test-dev')
if epoch % 1 == 0:
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '
f'Train: {train_acc:.4f}, Valid: {valid_acc:.4f}, '
f'Best: {best_valid_acc:.4f}')
| mit |
giorgiop/scikit-learn | sklearn/cluster/tests/test_spectral.py | 71 | 7950 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
glennq/scikit-learn | sklearn/linear_model/__init__.py | 82 | 3139 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
D-K-E/cltk | src/cltk/nlp.py | 1 | 6266 | """Primary module for CLTK pipeline."""
from threading import Lock
from typing import Type
import cltk
from cltk.core.data_types import Doc, Language, Pipeline, Process
from cltk.core.exceptions import UnimplementedAlgorithmError
from cltk.languages.pipelines import (
AkkadianPipeline,
ArabicPipeline,
AramaicPipeline,
ChinesePipeline,
CopticPipeline,
GothicPipeline,
GreekPipeline,
HindiPipeline,
LatinPipeline,
MiddleEnglishPipeline,
MiddleFrenchPipeline,
MiddleHighGermanPipeline,
OCSPipeline,
OldEnglishPipeline,
OldFrenchPipeline,
OldNorsePipeline,
PaliPipeline,
PanjabiPipeline,
SanskritPipeline,
)
from cltk.languages.utils import get_lang
iso_to_pipeline = {
"akk": AkkadianPipeline,
"ang": OldEnglishPipeline,
"arb": ArabicPipeline,
"arc": AramaicPipeline,
"chu": OCSPipeline,
"cop": CopticPipeline,
"enm": MiddleEnglishPipeline,
"frm": MiddleFrenchPipeline,
"fro": OldFrenchPipeline,
"gmh": MiddleHighGermanPipeline,
"got": GothicPipeline,
"grc": GreekPipeline,
"hin": HindiPipeline,
"lat": LatinPipeline,
"lzh": ChinesePipeline,
"non": OldNorsePipeline,
"pan": PanjabiPipeline,
"pli": PaliPipeline,
"san": SanskritPipeline,
}
class NLP:
"""NLP class for default processing."""
process_objects = dict()
process_lock = Lock()
def __init__(
self,
language: str,
custom_pipeline: Pipeline = None,
suppress_banner: bool = False,
) -> None:
"""Constructor for CLTK class.
Args:
language: ISO code
custom_pipeline: Optional ``Pipeline`` for processing text.
>>> from cltk import NLP
>>> cltk_nlp = NLP(language="lat", suppress_banner=True)
>>> isinstance(cltk_nlp, NLP)
True
>>> from cltk.core.data_types import Pipeline
>>> from cltk.tokenizers import LatinTokenizationProcess
>>> from cltk.languages.utils import get_lang
>>> a_pipeline = Pipeline(description="A custom Latin pipeline", processes=[LatinTokenizationProcess], language=get_lang("lat"))
>>> nlp = NLP(language="lat", custom_pipeline=a_pipeline, suppress_banner=True)
>>> nlp.pipeline is a_pipeline
True
"""
self.language = get_lang(language) # type: Language
self.pipeline = custom_pipeline if custom_pipeline else self._get_pipeline()
if not suppress_banner:
self._print_pipelines_for_current_lang()
def _print_pipelines_for_current_lang(self):
"""Print to screen the ``Process``es invoked upon invocation
of ``NLP()``.
"""
processes_name = [
process.__name__ for process in self.pipeline.processes
] # type: List[str]
processes_name_str = "`, `".join(processes_name) # type: str
ltr_mark = "\u200E"
alep = "𐤀"
print(f"{ltr_mark + alep} CLTK version '{cltk.__version__.version}'.")
print(
f"Pipeline for language '{self.language.name}' (ISO: '{self.language.iso_639_3_code}'): `{processes_name_str}`."
)
def _get_process_object(self, process_object: Type[Process]) -> Process:
"""
Returns an instance of a process from a memoized hash.
An un-instantiated process is created and stashed in the cache.
"""
with NLP.process_lock:
a_process = NLP.process_objects.get(process_object, None)
if a_process:
return a_process
else:
a_process = process_object(self.language.iso_639_3_code)
NLP.process_objects[process_object] = a_process
return a_process
def analyze(self, text: str) -> Doc:
"""The primary method for the NLP object, to which raw text strings are passed.
Args:
text: Input text string.
Returns:
CLTK ``Doc`` containing all processed information.
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.core.data_types import Doc
>>> cltk_nlp = NLP(language="lat", suppress_banner=True)
>>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lat"))
>>> isinstance(cltk_doc, Doc)
True
>>> cltk_doc.words[0] # doctest: +ELLIPSIS
Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Gallia', pos=noun, lemma='mallis', stem=None, scansion=None, xpos='A1|grn1|casA|gen2', upos='NOUN', dependency_relation='nsubj', governor=3, features={Case: [nominative], Degree: [positive], Gender: [feminine], Number: [singular]}, category={F: [neg], N: [pos], V: [neg]}, stop=False, named_entity='LOCATION', syllables=None, phonetic_transcription=None, definition='')
"""
doc = Doc(language=self.language.iso_639_3_code, raw=text)
for process in self.pipeline.processes:
a_process = self._get_process_object(process)
doc = a_process.run(doc)
return doc
def _get_pipeline(self) -> Pipeline:
"""Select appropriate pipeline for given language. If custom
processing is requested, ensure that user-selected choices
are valid, both in themselves and in unison.
>>> from cltk.core.data_types import Pipeline
>>> cltk_nlp = NLP(language="lat", suppress_banner=True)
>>> lat_pipeline = cltk_nlp._get_pipeline()
>>> isinstance(cltk_nlp.pipeline, Pipeline)
True
>>> isinstance(lat_pipeline, Pipeline)
True
>>> cltk_nlp = NLP(language="axm", suppress_banner=True)
Traceback (most recent call last):
...
cltk.core.exceptions.UnimplementedAlgorithmError: Valid ISO language code, however this algorithm is not available for ``axm``.
"""
try:
return iso_to_pipeline[self.language.iso_639_3_code]()
except KeyError:
raise UnimplementedAlgorithmError(
f"Valid ISO language code, however this algorithm is not available for ``{self.language.iso_639_3_code}``."
)
def __call__(self, text: str) -> Doc:
return self.analyze(text)
| mit |
giorgiop/scikit-learn | setup.py | 9 | 12000 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
import traceback
import subprocess
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.9'
NUMPY_MIN_VERSION = '1.6.1'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
traceback.print_exc()
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
discourse-lab/DiscourseSegmenter | dsegmenter/bparseg/bparsegmenter.py | 2 | 16936 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
##################################################################
# Documentation
"""Module providing discourse segmenter for constituency trees.
Attributes:
SUBSTITUTEF (method): custom weighting function used for token alignment
_ispunct (method): check if word consists only of punctuation characters
_prune_punc (method): remove tokens representing punctuation from set
_translate_toks (method): replace tokens and return updated set
tree2tok (method): create dictionary mapping constituency trees to numbered tokens
read_trees (method): read file and return a list of constituent dictionaries
read_segments (method): read file and return a list of segment dictionaries
trees2segs (method): align trees with corresponding segments
featgen (method): default feature generation function
classify (method): default classification method
Classes:
BparSegmenter: discourse segmenter for constituency trees
.. moduleauthor:: Wladimir Sidorenko (Uladzimir Sidarenka)
"""
##################################################################
# Libraries
from .align import nw_align
from .constants import ENCODING
from .constituency_tree import Tree, CTree
from ..treeseg import TreeSegmenter, DiscourseSegment, CONSTITUENCY, DEFAULT_SEGMENT
# from sklearn.cross_validation import KFold
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import VarianceThreshold, SelectKBest
from sklearn.metrics import precision_recall_fscore_support, classification_report, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
import locale
import os
import re
import sys
import string
##################################################################
# Constants
NONE = str(None)
N_FOLDS = 10
SUBSTITUTEF = lambda c1, c2: 2 if c1[-1] == c2[-1] else -3
ESCAPE_QUOTE_RE = re.compile(r"\\+([\"'])")
ESCAPE_SLASH_RE = re.compile(r"\\/")
##################################################################
# Methods
locale.setlocale(locale.LC_ALL, "")
def _ispunct(a_word):
"""Check if word consists only of punctuation characters.
Args:
a_word (str): word to check
Returns:
(bool) True if word consists only of punctuation characters, False otherwise
"""
return all(c in string.punctuation for c in a_word)
def _prune_punc(a_toks):
"""Remove tokens representing punctuation from set.
@param a_toks - tokens to prune
@return token set with punctuation tokens removed
"""
return frozenset([tok for tok in a_toks if not _ispunct(tok[-1])])
def _translate_toks(a_toks, a_translation):
"""Translate tokens and return translated set.
@param a_toks - tokens to be translated
@param a_translation - translation dictionary for tokens
@return translated set of tokens
"""
if a_translation is None:
return a_toks
ret = set()
for tok in a_toks:
for t_tok in a_translation[tok]:
ret.add(t_tok)
return frozenset(ret)
def tree2tok(a_tree, a_start = 0):
"""Create dictionary mapping constituency trees to numbered tokens.
Args:
a_tree (constituency_tree.Tree): tree to analyze
a_start (int): starting position of the first token
Returns:
(dict) mapping from subtrees to their yields
"""
rset = set()
chset = None
tr2tk = {(a_start, a_tree.label()): (a_tree, rset)}
i = a_start
max_ch_pos = -1
for child in a_tree:
if isinstance(child, Tree):
tr2tk.update(tree2tok(child, i))
chset = tr2tk[(i, child.label())][-1]
i += len(chset)
rset.update(chset)
else:
rset.add((i, child))
i += 1
return tr2tk
def read_trees(a_lines, a_one_per_line = False):
"""Read file and return a list of constituent dictionaries.
Args:
a_lines (list[str]): decoded lines of the input file
Returns:
2-tuple: list of dictionaries mapping tokens to trees and a list of trees
"""
ctrees = CTree.parse_lines(a_lines, a_one_per_line = a_one_per_line)
# generate dictionaries mapping trees' yields to trees
t_cnt = 0
t2t = None
trees2toks = dict()
for ctree in ctrees:
t2t = tree2tok(ctree, t_cnt)
trees2toks.update(t2t)
t_cnt += len(t2t[(t_cnt, ctree.label())][-1])
toks2trees = dict()
for ((tree_c, tree_lbl), (tree, toks)) in trees2toks.iteritems():
toks = frozenset(toks)
if toks in toks2trees:
toks2trees[toks].append(tree)
else:
toks2trees[toks] = [tree]
return toks2trees, ctrees
def read_segments(a_lines):
"""Read file and return a list of segment dictionaries.
Args:
a_lines (list): decoded lines of the input file
Returns:
dict: mapping from tokens to segments
"""
segs2toks = {}
s_c = t_c = 0
tokens = []
atoks = []
new_seg = None
active_tokens = set()
active_segments = []
# read segments
for iline in a_lines:
iline = iline.strip()
if not iline:
continue
# do some clean-up
active_tokens.clear()
del atoks[:]
del active_segments[:]
tokens = iline.split()
# establish correspondence between tokens and segments
for tok in tokens:
if tok[0] == '(' and len(tok) > 1:
active_tokens = set(atoks)
del atoks[:]
for a_s in active_segments:
segs2toks[a_s].update(active_tokens)
new_seg = (s_c, tok[1:])
active_segments.append(new_seg)
segs2toks[new_seg] = set()
s_c += 1
continue
elif tok == ')':
assert active_segments, "Unbalanced closing parenthesis at line: " + repr(iline)
active_tokens = set(atoks)
del atoks[:]
for a_s in active_segments:
segs2toks[a_s].update(active_tokens)
active_segments.pop()
continue
else:
atoks.append((t_c, tok))
t_c += 1
assert not active_segments, "Unbalanced opening parenthesis at line: " + repr(iline)
toks2segs = dict()
segments = segs2toks.keys()
segments.sort(key = lambda el: el[0])
for seg in segments:
toks = frozenset(segs2toks[seg])
# it can be same tokenset corresponds to multiple segments, in that
# case we leave the first one that we encounter
if toks in toks2segs:
continue
assert toks not in toks2segs, "Multiple segments correspond to the same tokenset: '" + \
repr(toks) + "': " + repr(seg) + ", " + repr(toks2segs[toks])
toks2segs[toks] = seg
return toks2segs
def trees2segs(a_toks2trees, a_toks2segs):
"""Align trees with corresponding segments.
Args:
a_toks2trees (dict): mapping from tokens to trees
a_toks2segs (dict): mapping from tokens to segments
Returns:
dict: mapping from trees to segments
"""
# prune empty trees and their corresponding segments
tree2seg = {t: None for val in a_toks2trees.values() for t in val}
# add additional keys to `a_toks2trees` by pruning punctuation marks from
# existing trees
pruned_toks = None
tree_tok_keys = a_toks2trees.keys()
for tree_toks in tree_tok_keys:
pruned_toks = _prune_punc(tree_toks)
if pruned_toks not in a_toks2trees:
a_toks2trees[pruned_toks] = a_toks2trees[tree_toks]
# establish a mapping between tree tokens and segment tokens
tree_toks = list(set([t for t_set in a_toks2trees.keys() for t in t_set]))
tree_toks.sort(key = lambda el: el[0])
seg_toks = list(set([t for t_set in a_toks2segs.keys() for t in t_set]))
seg_toks.sort(key = lambda el: el[0])
# align tokens if necessary
seg_t2tree_t = None
if tree_toks != seg_toks:
seg_t2tree_t = dict()
alignment = nw_align(seg_toks, tree_toks, substitute = SUBSTITUTEF, keep_deleted = True)
for i, tt in enumerate(alignment):
seg_t2tree_t[seg_toks[i]] = [tree_toks[j] for j in tt]
# for each segment look if its corresponding token set is matched by
# any other subtree
translated_toks = None
for toks, segs in a_toks2segs.iteritems():
translated_toks = _translate_toks(toks, seg_t2tree_t)
key = None
if translated_toks in a_toks2trees:
key = translated_toks
else:
translated_toks = _prune_punc(translated_toks)
if translated_toks in a_toks2trees:
key = translated_toks
if key:
for tree in a_toks2trees[key]:
# if tree2seg[tree] is not None:
# continue
assert tree2seg[tree] is None, "Multiple segments found for tree" + repr(tree) + ": " +\
repr(segs[-1]) + "; " + repr(tree2seg[tree])
tree2seg[tree] = segs[-1]
return tree2seg
def featgen(a_tree):
"""Generate features for the given BitPar tree.
@param a_tree - BitPar tree for which we should generate features
@return list of string features
"""
assert a_tree.leaves(), "Tree does not contain leaves."
# add unigram features
ret = {u"tok_{:s}".format(token.lower()): 1 for token in a_tree.leaves()}
# add very first and very last tokens of the tree
ret[u"tokFirst_{:s}".format(a_tree.leaves()[0].lower())] = 1
ret[u"tokLast_{:s}".format(a_tree.leaves()[-1].lower())] = 1
sublabels = [st.label() for st in a_tree.subtrees()]
if sublabels:
ret[u"lblFirst_{:s}".format(sublabels[0].lower())] = 1
ret[u"lblLast_{:s}".format(sublabels[-1].lower())] = 1
# add tree label
ret[u"lbl_{:s}".format(a_tree.label())] = 1
# add label of the parent tree
ret[u"prntLbl_{:s}".format(a_tree.prnt_label())] = 1
# add first and last word of the parent tree
if a_tree.parent():
prnt_tree = a_tree.parent()
t_idx = a_tree.parent_index()
ret[u"treeIdx"] = t_idx
if t_idx > 0:
prev_tree = prnt_tree[t_idx - 1]
ret[u"prevLbl_{:s}".format(prev_tree.label())] = 1
ret[u"prevTokFrst_{:s}".format(prev_tree.leaves()[0].lower())] = 1
ret[u"prevTokLst_{:s}".format(prev_tree.leaves()[-1].lower())] = 1
if t_idx + 1 < len(prnt_tree):
nxt_tree = prnt_tree[t_idx + 1]
ret[u"nxtLbl_{:s}".format(nxt_tree.label())] = 1
ret[u"pxtTokFrst_{:s}".format(nxt_tree.leaves()[0].lower())] = 1
ret[u"pxtTokLst_{:s}".format(nxt_tree.leaves()[-1].lower())] = 1
# add tree height
ret["height"] = a_tree.height()
# add label of the parent tree
return ret
def classify(a_classifier, a_featgen, a_el, a_default = None):
"""Classify given element.
@param a_classifier - model which should make predictions
@param a_featgen - feature generation function
@param a_el - constituency tree to be classified
@param a_default - default element that should be returned if el does
not yield segment
@return assigned class
"""
prediction = a_classifier.predict(a_featgen(a_el))[0]
return a_default if prediction is None or prediction == NONE else prediction
##################################################################
# Class
class BparSegmenter(object):
"""Class for perfoming discourse segmentation on constituency trees.
"""
#: classifier object: default classification method
DEFAULT_CLASSIFIER = LinearSVC(C = 0.3, multi_class = 'crammer_singer')
#:str: path to default model to use in classification
DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "data", "bpar.model")
#:pipeline object: default pipeline object used for classification
DEFAULT_PIPELINE = Pipeline([('vectorizer', DictVectorizer()),
('var_filter', VarianceThreshold()),
('LinearSVC', DEFAULT_CLASSIFIER)])
def __init__(self, a_featgen = featgen, a_classify = classify, \
a_model = DEFAULT_MODEL):
"""Class constructor.
Args:
a_featgen (method): function to be used for feature generation
a_classify (method): pointer to 2-arg function which predicts segment
class for BitPar tree based on the model and
features generated for that tree
a_model (str): path to a pre-trained model (previously dumped by
joblib) or valid classification object or None
"""
self.featgen = a_featgen
self.classify = a_classify
self._update_segmenter(a_model)
def segment(self, a_trees):
"""Create discourse segments based on the BitPar trees.
Args:
a_trees (list): list of sentence trees to be parsed
Returns:
iterator: constructed segment trees
"""
seg_idx = 0
segments = []
isegment = None
if self.model is None:
return [DiscourseSegment(a_name = DEFAULT_SEGMENT, a_leaves = t.leaves) \
for t in a_trees]
for t in a_trees:
self._segmenter.segment(t, segments)
# if classifier failed to create one common segment for
# the whole tree, create one for it
if (len(segments) - seg_idx) > 1 or \
(len(segments) and not isinstance(segments[-1][-1], DiscourseSegment)):
isegment = DiscourseSegment(a_name = DEFAULT_SEGMENT, \
a_leaves = segments[seg_idx:])
segments[seg_idx:] = [(isegment.leaves[0][0], isegment)]
seg_idx = len(segments)
return segments
def train(self, a_trees, a_segs, a_path):
"""Train segmenter model.
Args:
a_trees (list): BitPar trees
a_segs (list): discourse segments
a_path (str): path to file in which the trained model should be
stored
Returns:
void:
"""
# drop current model
self._update_segmenter(self.DEFAULT_PIPELINE)
# generate features
feats = [self.featgen(t) for t in a_trees]
a_segs = [str(s) for s in a_segs]
# train classifier
self._train(feats, a_segs, self.model)
# store the model to file
joblib.dump(self.model, a_path)
def test(self, a_trees, a_segments):
"""Estimate performance of segmenter model.
Args:
a_trees (list): BitPar trees
a_segments (list): corresponding gold segments for trees
Returns:
2-tuple: macro and micro-averaged F-scores
"""
if self.model is None:
return (0, 0)
segments = [self.model.predict(self.featgen(itree))[0] for itree in a_trees]
a_segments = [str(s) for s in a_segments]
_, _, macro_f1, _ = precision_recall_fscore_support(a_segments, segments, average='macro', \
warn_for = ())
_, _, micro_f1, _ = precision_recall_fscore_support(a_segments, segments, average='micro', \
warn_for = ())
return (macro_f1, micro_f1)
def _train(self, a_feats, a_segs, a_model):
"""Train segmenter model.
@param a_feats - list of BitPar featuress
@param a_segs - list of discourse segments
@param a_model - model object whose parameters should be fit
@return \c void
"""
# train classifier
a_model.fit(a_feats, a_segs)
self._update_segmenter(a_model)
def _update_segmenter(self, a_model):
"""Update model, decision function, and internal segmenter.
@param a_model - model used by classifier
@return \c void
"""
if a_model is None:
self.model = a_model
self.decfunc = lambda el: None
self._segmenter = TreeSegmenter(a_decfunc = self.decfunc, a_type = CONSTITUENCY)
return
elif isinstance(a_model, str):
if not os.path.isfile(a_model) or not os.access(a_model, os.R_OK):
raise RuntimeError("Can't create model from file {:s}".format(a_model))
self.model = joblib.load(a_model)
else:
self.model = a_model
self.decfunc = lambda el: self.classify(self.model, self.featgen, el)
self._segmenter = TreeSegmenter(a_decfunc = self.decfunc, a_type = CONSTITUENCY)
| mit |
R0bk/suburbalytics-model | models/population.py | 1 | 2977 | from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler, normalize, scale
from sklearn.metrics import mean_squared_error
import numpy as np
import keras
import math
import csv
import sys
import os
scaler = MinMaxScaler(feature_range=(0, 1))
def load_csv(file, row=-1):
csvd = '../data-collection/population/' + file
data = np.genfromtxt(csvd, delimiter=',')
if row != -1:
X = data[row, 1:]
y = [X[i - 1] for i, x in enumerate(X) if i != 0]
else:
X = data[:, 0]
y = data[:, 1]
return(X, y)
raw_X, raw_Y = X, y
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
norm_X = normalize(X)
split = int(0.7 * len(X))
return(X, y, split, data, norm_X, raw_X, raw_Y)
def list_of_lists(array):
return(np.array([[x] for x in array]))
def model(file, n, row, wr):
X, y, split, data, norm_X, raw_X, raw_Y = load_csv(file, row)
X = np.array([x for x in X])
y = np.array([x for x in y])
#train, test = norm_X[0: split, :], norm_X[split: len(norm_X), :]
look_back = 1
trainX, trainY = (list_of_lists(X[:split]), list_of_lists(y[:split]))
testX, testY = (list_of_lists(X[split:]), list_of_lists(y[split:]))
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, trainX.shape[1]))
model = Sequential()
model.add(LSTM(4, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=150, batch_size=1, verbose=2)
print(trainX)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform(trainY)
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform(testY)
future = []
for i in range(n):
if not len(future):
value = scaler.inverse_transform(model.predict(testX))[-1]
else:
print('**********************************************')
scaled = scaler.fit_transform(list_of_lists(np.append(raw_X, future)))
arranged = np.reshape(scaled, (scaled.shape[0], 1, scaled.shape[1]))
print(arranged)
value = scaler.inverse_transform(model.predict(arranged))[-1]
future.append(value)
with open('future' + file, 'a') as f:
wr = csv.writer(f, delimiter=',')
wr.writerow(future)
def predicter(n, data):
for file in os.listdir(data):
with open('future' + file, 'w') as f:
wr = csv.writer(f, delimiter=',')
print('processing:\t' + file)
for row in range(len(load_csv(file)[0])):
print('processing row:\t' + str(row))
model(file, n, row, wr)
predicter(10, '../data-collection/population/')
| mit |
XiaoLiuAI/RUPEE | src/python/model/util.py | 1 | 15155 | '''
Created on Feb 29, 2012
@author: xiaoliu
'''
import pdb
import numpy as np
import scipy as sp
import sklearn.metrics
import sklearn.utils
import sklearn.externals.joblib
#import copy
'*************** ignore the negative class *******************'
'''
My implementation of precision_recall_fscore_support that
ignore the negative class when computing average score.
Returns score of negative class if not computing average score.
'''
def unique_labels(*lists_of_labels):
"""Extract an ordered array of unique labels"""
labels = set().union(*(l.ravel() if hasattr(l, "ravel") else l
for l in lists_of_labels))
return np.asarray(sorted(labels))
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None):
'copy from sklearn'
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_true, y_pred = sklearn.utils.check_arrays(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
true_pos = np.zeros(n_labels, dtype=np.double)
false_pos = np.zeros(n_labels, dtype=np.double)
false_neg = np.zeros(n_labels, dtype=np.double)
support = np.zeros(n_labels, dtype=np.long)
for i, label_i in enumerate(labels):
true_pos[i] = np.sum(y_pred[y_true == label_i] == label_i)
false_pos[i] = np.sum(y_pred[y_true != label_i] == label_i)
false_neg[i] = np.sum(y_pred[y_true == label_i] != label_i)
support[i] = np.sum(y_true == label_i)
#tp, fp, fn = np.sum(true_pos[1:]), np.sum(false_pos[1:]), np.sum(support[1:])-np.sum(true_pos[1:])
#print sklearn.metrics.confusion_matrix(y_true, y_pred)
#print 'true positive', tp,'\tfalse positive', fp, '\tfalse negative', fn
#print 'f-score', 2.*tp/(2*tp+fn+fp)
try:
# oddly, we may get an "invalid" rather than a "divide" error here
old_err_settings = np.seterr(divide='ignore', invalid='ignore')
# precision and recall
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
# handle division by 0.0 in precision and recall
precision[(true_pos + false_pos) == 0.0] = 0.0
recall[(true_pos + false_neg) == 0.0] = 0.0
# fbeta score
beta2 = beta ** 2
fscore = (1 + beta2) * (precision * recall) / (
beta2 * precision + recall)
# handle division by 0.0 in fscore
fscore[(precision + recall) == 0.0] = 0.0
finally:
np.seterr(**old_err_settings)
if not average:
return precision, recall, fscore, support
elif n_labels == 2 and pos_label is not None:
if pos_label not in labels:
raise ValueError("pos_label=%d is not a valid label: %r" %
(pos_label, labels))
pos_label_idx = list(labels).index(pos_label)
return (precision[pos_label_idx], recall[pos_label_idx],
fscore[pos_label_idx], support[pos_label_idx])
else:
average_options = (None, 'micro', 'macro', 'weighted')
true_pos = true_pos[1:]
false_pos = false_pos[1:]
false_neg = false_neg[1:]
if average == 'micro':
nb_pos = true_pos.sum() + false_pos.sum()
if nb_pos == 0:
avg_precision = 0
avg_recall = 0
avg_fscore = 0
else:
nb_true = true_pos.sum() + false_neg.sum()
avg_precision = true_pos.sum() / nb_pos
avg_recall = true_pos.sum() / nb_true
avg_fscore = (1 + beta2) * (avg_precision * avg_recall) / \
(beta2 * avg_precision + avg_recall)
#print 'avg prc', avg_precision, '\tavg rec', avg_recall
#pdb.set_trace()
elif average == 'macro':
avg_precision = np.mean(precision)
avg_recall = np.mean(recall)
avg_fscore = np.mean(fscore)
elif average == 'weighted':
avg_precision = np.average(precision, weights=support)
avg_recall = np.average(recall, weights=support)
avg_fscore = np.average(fscore, weights=support)
else:
raise ValueError('average has to be one of ' +
str(average_options))
return avg_precision, avg_recall, avg_fscore, None
'compute macro average f score ignore the first class (negative class)'
def fbeta_score(y_true, y_pred, pos_label=1, weights=None, beta=1, average='macro'):
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, average = average)
if not weights:
return f
else:
'!--- codes below have problems ---!'
if not average:
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, average = average)
return np.average(f, weights=weights)
else:
raise Exception('fbeta_score can only specify one of the parameters (weights, average)')
def f1_score(y_true, y_pred, pos_label=1, weights=None):
return fbeta_score(y_true, y_pred, pos_label, weights)
'compute recall ignore the first class (negative class)'
def recall(y_true, y_pred, pos_label=1, weights=None):
_, r, _, _ = sklearn.metrics.precision_recall_fscore_support(y_true, y_pred)
if r.shape[0] == 2:
return r[pos_label]
else:
if weights != None:
return np.average(r[1:], weights=weights)
else:
return np.average(r[1:])
'compute the tp fp fn number for each class'
def tp_fp_fn_support(y_true, y_pred, beta=1.0, labels=None):
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
if labels is None:
labels = sklearn.metrics.metrics.unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
true_pos = np.zeros(n_labels, dtype=np.double)
false_pos = np.zeros(n_labels, dtype=np.double)
false_neg = np.zeros(n_labels, dtype=np.double)
support = np.zeros(n_labels, dtype=np.long)
for i, label_i in enumerate(labels):
true_pos[i] = np.sum(y_pred[y_true == label_i] == label_i)
false_pos[i] = np.sum(y_pred[y_true != label_i] == label_i)
false_neg[i] = np.sum(y_pred[y_true == label_i] != label_i)
support[i] = np.sum(y_true == label_i)
if n_labels == 2:
'it is important to choose the positive label for binary classifier'
return [(true_pos[1], false_pos[1], false_neg[1])]
'if it is multiclass, we ignore the negative class'
true_pos = sp.delete(true_pos, 0)
false_pos = sp.delete(false_pos, 0)
false_neg = sp.delete(false_neg, 0)
'return a list (tp, fp, fn), one tuple for each class'
return zip(true_pos, false_pos, false_neg)
'''
compute the fscore with (tp,fp,fn) list returned by tp_fp_fn_support
since the tp_fp_fn_support won't return negative class in multiclass
case, this function won't consider the negative class
the difference between this method and typical method is this method
compute f-score from cross-validation before average for classes
'''
def fbeta_tp_fp_fn(tp_fp_fn_lst, beta=1):
classes = zip(*tp_fp_fn_lst)
f_cls = [None] * len(classes)
beta2 = beta ** 2
for i, cls in enumerate(classes):
tp, fp, fn = 0, 0, 0
'compute number for cross validation'
for cv in cls:
tp += cv[0]
fp += cv[1]
fn += cv[2]
f = ((1+beta2) * tp) / ((1+beta2) * tp + fp + beta2*fn)
f_cls[i] = f
'macro average of f-score'
return sum(f_cls) / len(f_cls)
def f_tp_fp_fn(tp_fp_fn_lst):
return fbeta_tp_fp_fn(1, tp_fp_fn_lst)
'''
compute the fscore returned by cross_val_score_threshold with
tp_fp_fn_support
'''
def f_tp_fp_fn_threshold(folds):
nb_threshold = len(folds[0])
nb_cls = len(folds[0][0])
macro_average = 0
for cls in range(nb_cls):
# tp_threshold_lst = [0] * nb_threshold
# fp_threshold_lst = [0] * nb_threshold
# fn_threshold_lst = [0] * nb_threshold
# for fold in folds:
# for i, fp_fp_fn_ith_threshold in enumerate(fold):
# 'accumulate the number for each fold'
# tp_threshold_lst[i] += fp_fp_fn_ith_threshold[cls][0]
# fp_threshold_lst[i] += fp_fp_fn_ith_threshold[cls][1]
# fn_threshold_lst[i] += fp_fp_fn_ith_threshold[cls][2]
# f_threshold_lst = [(2.0 * tp_threshold_lst[i]) / (2 * tp_threshold_lst[i] + fp_threshold_lst[i] + fn_threshold_lst[i]) for i in range(nb_threshold)]
threshold_tuple = np.array([sum(folds[:,i, cls, :]) for i in range(nb_threshold)])
f_threshold = 2.0*threshold_tuple[:,0]/(2*threshold_tuple[:,0]+threshold_tuple[:,1]+threshold_tuple[:,2])
# pdb.set_trace()
macro_average += np.max(f_threshold)
macro_average /= nb_cls
return macro_average
#def micro_f_tp_fp_fn_threshold(folds):
def _cross_val_score_threshold(estimator, X, y, score_func, train, test):
estimator.fit(X[train], y[train])
Y_score = sklearn.multiclass._predict_binary(estimator, X[test])
'return the score with all the possible threshold'
threshold_lst = [x / 100.0 for x in range(-100, 100, 1)]
score_lst = list(threshold_lst)
for i, threshold in enumerate(threshold_lst):
score_lst[i] = score_func(y[test], Y_score > threshold)
return score_lst
def cross_val_score_threshold(estimator, X, y=None, score_func=None, cv=None, n_jobs=1):
X, y = sklearn.utils.check_arrays(X, y, sparse_format='csr')
cv = sklearn.cross_validation.check_cv(cv, X, y, classifier=sklearn.base.is_classifier(estimator))
if score_func is None:
if not hasattr(estimator, 'score'):
raise TypeError(
"If no score_func is specified, the estimator passed "
"should have a 'score' method. The estimator %s "
"does not." % estimator)
scores = sklearn.externals.joblib.parallel.Parallel(n_jobs=n_jobs)(
sklearn.externals.joblib.parallel.delayed(_cross_val_score_threshold)(
sklearn.base.clone(estimator),
X,
y,
score_func,
train,
test)
for train, test in cv)
'for each fold, return a score list corresponds to each threshold'
return np.array(scores)
'original, clean cross validation'
def _cross_val_score(estimator, X, y, score_func, train, test, verbose):
"""Inner loop for cross validation"""
estimator.fit(X[train], y[train])
if score_func is None:
score = estimator.score(X[test], y[test])
else:
score = score_func(y[test], estimator.predict(X[test]))
if verbose > 1:
print("score: %f" % score)
return score
def cross_val_score(estimator, X, y=None, score_func=None, cv=None, n_jobs=1,
verbose=0):
X, y = sklearn.utils.check_arrays(X, y, sparse_format='csr')
cv = sklearn.cross_validation.check_cv(cv, X, y, classifier=sklearn.base.is_classifier(estimator))
if score_func is None:
if not hasattr(estimator, 'score'):
raise TypeError(
"If no score_func is specified, the estimator passed "
"should have a 'score' method. The estimator %s "
"does not." % estimator)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
scores = sklearn.externals.joblib.parallel.Parallel(n_jobs=n_jobs, verbose=verbose)(
sklearn.externals.joblib.parallel.delayed(_cross_val_score)(
sklearn.base.clone(estimator),
X,
y,
score_func,
train,
test,
verbose)
for train, test in cv)
return np.array(scores)
'compute precision, recall and f1 score from cm matrix for each class'
def precision_recall_fscore_fromCM(cm):
# import pdb; pdb.set_trace()
if cm.shape[0] != cm.shape[1]:
raise Exception('confusion matrix has different dimensions')
eps = np.finfo(np.double).eps
precision = cm.diagonal() / (cm.sum(axis=1) + eps)
recall = cm.diagonal() / (cm.sum(axis=0) + eps)
fscore = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, fscore
'*************** all the classes *******************'
'macro average is the mean of score of each class'
def macro_f1_score(y_true, y_pred):
nb_ins = len(y_true)
if nb_ins != len(y_pred):
raise Exception('the size of prediction result does not consist with the size of label')
bl = sklearn.preprocessing.Binarizer()
Y = bl.transform(y_true.extends(y_pred))
Y_true = Y[0:nb_ins, :]
Y_pred = Y[nb_ins:nb_ins * 2, :]
nb_cls = Y.shape[1]
f_list = [0] * nb_cls
for i in range(nb_cls):
f_list[i] = sklearn.metrics.f1_score(Y_true[:, i], Y_pred[:, i])
return float(sum(f_list)) / nb_cls
'micro average is the score computed based on the sum of tp,fp,fn'
def micro_f1_score(y_true, y_pred):
nb_ins = len(y_true)
if nb_ins != len(y_pred):
raise Exception('the size of prediction result does not consist with the size of label')
bl = sklearn.preprocessing.Binarizer()
Y_true = bl.transform(y_true)
Y_pred = bl.transform(y_pred)
nb_true = sum(sum(Y_true))
nb_pred = sum(sum(Y_pred))
nb_tp = sum(sum(np.array(Y_true == Y_pred, dtype=int)))
return float(2 * nb_tp) / (nb_pred + nb_true)
#msg = False
'''
extend skleanr's Binarizer, aims to use different threshold for each class
'''
class LabelBinarizer(sklearn.preprocessing.LabelBinarizer):
def inverse_transform(self, Y, thresholds=None):
self._check_fitted()
if thresholds is None:
half = (self.pos_label - self.neg_label) / 2.0
thresholds = [self.neg_label + half]
if self.multilabel:
for i, threshold in enumerate(thresholds):
Y[:, i] = np.array(Y[:, i] > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > thresholds[0], dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
| gpl-2.0 |
ewels/NGI-RNAseq | bin/check_samplesheet.py | 1 | 7375 | #!/usr/bin/env python3
import os
import sys
import errno
import argparse
def parse_args(args=None):
Description = "Reformat nf-core/rnaseq samplesheet file and check its contents."
Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
parser.add_argument("FILE_IN", help="Input samplesheet file.")
parser.add_argument("FILE_OUT", help="Output file.")
return parser.parse_args(args)
def make_dir(path):
if len(path) > 0:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise exception
def print_error(error, context="Line", context_str=""):
error_str = f"ERROR: Please check samplesheet -> {error}"
if context != "" and context_str != "":
error_str = f"ERROR: Please check samplesheet -> {error}\n{context.strip()}: '{context_str.strip()}'"
print(error_str)
sys.exit(1)
def check_samplesheet(file_in, file_out):
"""
This function checks that the samplesheet follows the following structure:
sample,fastq_1,fastq_2,strandedness
SAMPLE_PE,SAMPLE_PE_RUN1_1.fastq.gz,SAMPLE_PE_RUN1_2.fastq.gz,forward
SAMPLE_PE,SAMPLE_PE_RUN2_1.fastq.gz,SAMPLE_PE_RUN2_2.fastq.gz,forward
SAMPLE_SE,SAMPLE_SE_RUN1_1.fastq.gz,,forward
For an example see:
https://github.com/nf-core/test-datasets/blob/rnaseq/samplesheet/v3.1/samplesheet_test.csv
"""
sample_mapping_dict = {}
with open(file_in, "r", encoding='utf-8-sig') as fin:
## Check header
MIN_COLS = 3
HEADER = ["sample", "fastq_1", "fastq_2", "strandedness"]
header = [x.strip('"') for x in fin.readline().strip().split(",")]
if header[: len(HEADER)] != HEADER:
print(
f"ERROR: Please check samplesheet header -> {','.join(header)} != {','.join(HEADER)}"
)
sys.exit(1)
## Check sample entries
for line in fin:
if line.strip():
lspl = [x.strip().strip('"') for x in line.strip().split(",")]
## Check valid number of columns per row
if len(lspl) < len(HEADER):
print_error(
f"Invalid number of columns (minimum = {len(HEADER)})!",
"Line",
line,
)
num_cols = len([x for x in lspl if x])
if num_cols < MIN_COLS:
print_error(
f"Invalid number of populated columns (minimum = {MIN_COLS})!",
"Line",
line,
)
## Check sample name entries
sample, fastq_1, fastq_2, strandedness = lspl[: len(HEADER)]
if sample.find(" ") != -1:
print(
f"WARNING: Spaces have been replaced by underscores for sample: {sample}"
)
sample = sample.replace(" ", "_")
if not sample:
print_error("Sample entry has not been specified!", "Line", line)
## Check FastQ file extension
for fastq in [fastq_1, fastq_2]:
if fastq:
if fastq.find(" ") != -1:
print_error("FastQ file contains spaces!", "Line", line)
if not fastq.endswith(".fastq.gz") and not fastq.endswith(".fq.gz"):
print_error(
"FastQ file does not have extension '.fastq.gz' or '.fq.gz'!",
"Line",
line,
)
## Check strandedness
strandednesses = ["unstranded", "forward", "reverse"]
if strandedness:
if strandedness not in strandednesses:
print_error(
f"Strandedness must be one of '{', '.join(strandednesses)}'!",
"Line",
line,
)
else:
print_error(
f"Strandedness has not been specified! Must be one of {', '.join(strandednesses)}.",
"Line",
line,
)
## Auto-detect paired-end/single-end
sample_info = [] ## [single_end, fastq_1, fastq_2, strandedness]
if sample and fastq_1 and fastq_2: ## Paired-end short reads
sample_info = ["0", fastq_1, fastq_2, strandedness]
elif sample and fastq_1 and not fastq_2: ## Single-end short reads
sample_info = ["1", fastq_1, fastq_2, strandedness]
else:
print_error("Invalid combination of columns provided!", "Line", line)
## Create sample mapping dictionary = {sample: [[ single_end, fastq_1, fastq_2, strandedness ]]}
if sample not in sample_mapping_dict:
sample_mapping_dict[sample] = [sample_info]
else:
if sample_info in sample_mapping_dict[sample]:
print_error("Samplesheet contains duplicate rows!", "Line", line)
else:
sample_mapping_dict[sample].append(sample_info)
## Write validated samplesheet with appropriate columns
if len(sample_mapping_dict) > 0:
out_dir = os.path.dirname(file_out)
make_dir(out_dir)
with open(file_out, "w") as fout:
fout.write(
",".join(["sample", "single_end", "fastq_1", "fastq_2", "strandedness"])
+ "\n"
)
for sample in sorted(sample_mapping_dict.keys()):
## Check that multiple runs of the same sample are of the same datatype i.e. single-end / paired-end
if not all(
x[0] == sample_mapping_dict[sample][0][0]
for x in sample_mapping_dict[sample]
):
print_error(
f"Multiple runs of a sample must be of the same datatype i.e. single-end or paired-end!",
"Sample",
sample,
)
## Check that multiple runs of the same sample are of the same strandedness
if not all(
x[-1] == sample_mapping_dict[sample][0][-1]
for x in sample_mapping_dict[sample]
):
print_error(
f"Multiple runs of a sample must have the same strandedness!",
"Sample",
sample,
)
for idx, val in enumerate(sample_mapping_dict[sample]):
fout.write(",".join([f"{sample}_T{idx+1}"] + val) + "\n")
else:
print_error(f"No entries to process!", "Samplesheet: {file_in}")
def main(args=None):
args = parse_args(args)
check_samplesheet(args.FILE_IN, args.FILE_OUT)
if __name__ == "__main__":
sys.exit(main())
| mit |
eranchetz/nupic | tests/swarming/nupic/swarming/experiments/field_threshold_temporal/description.py | 32 | 17206 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { u'attendance': { 'clipInput': True,
'fieldname': u'attendance',
'maxval': 36067,
'minval': 0,
'n': 150,
'name': u'attendance',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'daynight': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'home_winloss': { 'clipInput': True,
'fieldname': u'home_winloss',
'maxval': 0.69999999999999996,
'minval': 0.0,
'n': 150,
'name': u'home_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'precip': { 'fieldname': u'precip',
'n': 300,
'name': u'precip',
'type': 'SDRCategoryEncoder',
'w': 21},
u'timestamp_dayOfWeek': { 'dayOfWeek': (7, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 1),
'type': 'DateEncoder'},
u'visitor_winloss': { 'clipInput': True,
'fieldname': u'visitor_winloss',
'maxval': 0.78600000000000003,
'minval': 0.0,
'n': 150,
'name': u'visitor_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'baseball benchmark test',
u'streams': [ { u'columns': [ u'daynight',
u'precip',
u'home_winloss',
u'visitor_winloss',
u'attendance',
u'timestamp'],
u'info': u'OAK01.csv',
u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.prediction,
metric='aae', params={'window': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.prediction,
metric='trivial_aae', params={'window': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.encodings,
metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}),
MetricSpec(field=u'attendance',
inferenceElement=InferenceElement.encodings,
metric='nupicScore_scalar',
params={'frequencyWindow': 1000})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
PatrickOReilly/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 10 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
achm6174/kaggle-physics-tau | ensemble_weight/ensemble_weight.py | 1 | 4552 | """
@author: achm
Calculate the weight of the Final ensemble of strong and weak model via keras
"""
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
from keras.models import Sequential
from sklearn.preprocessing import StandardScaler
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
from keras.layers.convolutional import Convolution1D
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.embeddings import Embedding
import xgboost as xgb
import sys
import cPickle
import copy
import glob
# Load data
print("Load the training/test data using pandas")
training = pd.read_csv("../input/training.csv")
training['ensemble_weight'] = 1
training.drop('min_ANNmuon', axis=1, inplace=True)
training.drop('mass', axis=1, inplace=True)
training.drop('production', axis=1, inplace=True)
training.drop('signal', axis=1, inplace=True)
param_epoch = 300
for i in range(0,5):
print "### %i ###" %i
try:
fh = open("./model/keras_%i_epoch_%i" %(i,param_epoch), "rb")
deep_model = cPickle.load(fh)
fh.close()
except:
scaler = StandardScaler()
np.random.seed(6174)
print "No prebuild model..."
testing = pd.read_csv("./input/testing_%i.csv" %i)
testing['ensemble_weight'] = 0
#scaler = StandardScaler()
result = pd.concat([training, testing])
y = result["ensemble_weight"]
# Drop Unnesscary features
result.drop('ensemble_weight', axis=1, inplace=True)
result.drop('id', axis=1, inplace=True)
deep_model = copy.deepcopy(Sequential())
deep_model.add(Dense(result.shape[1], 512, init = "glorot_normal"))
deep_model.add(Activation('tanh'))
deep_model.add(Dropout(0.5))
deep_model.add(Dense(512, 256, init = "glorot_normal"))
deep_model.add(Activation('relu'))
deep_model.add(Dropout(0.4))
deep_model.add(Dense(256, 128, init = "glorot_normal"))
deep_model.add(Activation('tanh'))
deep_model.add(Dropout(0.3))
deep_model.add(Dense(128, 64, init = "glorot_normal"))
deep_model.add(Activation('relu'))
deep_model.add(Dropout(0.2))
deep_model.add(Dense(64, 2, init = "glorot_normal"))
deep_model.add(Activation('softmax'))
deep_model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
deep_model.fit(scaler.fit_transform(np.array(result)), np_utils.to_categorical(y),
batch_size=256, nb_epoch=param_epoch, verbose=2, show_accuracy=True)
# save model
temp_file_name = "./model/keras_%i_epoch_%i" %(i,param_epoch)
fh = open(temp_file_name, "wb")
cPickle.dump(deep_model,fh)
fh.close()
# save scalar
temp_file_name = "./model/keras_scalar_%i_epoch_%i" %(i,param_epoch)
fh = open(temp_file_name, "wb")
cPickle.dump(scaler,fh)
fh.close()
fh = open("./model/keras_scalar_%i_epoch_%i" %(i,param_epoch), "rb")
scaler = cPickle.load(fh)
fh.close()
# Make Prediction
testing_eval = pd.read_csv("./input/testing_eval_%i.csv" %i)
#################### FIX #########################
ids = testing_eval['id']
testing_eval.drop('id', axis=1, inplace=True)
##################################################
ensemble_weight = deep_model.predict(scaler.transform(testing_eval), batch_size=256)[:, 1]
# Generate ensemble weight
with open('./output/ensemble_weight_%i_epoch_%i.csv' %(i,param_epoch), 'w') as f:
f.write('id,weight\n')
for ID, p in zip(ids, ensemble_weight):
f.write('%s,%.8f\n' % (ID, p))
# Combine
print("Load ensemble weighting")
ensemble_weight_0 = pd.read_csv("./output/ensemble_weight_0_epoch_%i.csv" %param_epoch)
ensemble_weight_1 = pd.read_csv("./output/ensemble_weight_1_epoch_%i.csv" %param_epoch)
ensemble_weight_2 = pd.read_csv("./output/ensemble_weight_2_epoch_%i.csv" %param_epoch)
ensemble_weight_3 = pd.read_csv("./output/ensemble_weight_3_epoch_%i.csv" %param_epoch)
ensemble_weight_4 = pd.read_csv("./output/ensemble_weight_4_epoch_%i.csv" %param_epoch)
ensemble_weight = pd.concat([ensemble_weight_0, ensemble_weight_1, ensemble_weight_2, ensemble_weight_3, ensemble_weight_4])
ensemble_weight.to_csv("./output/ensemble_weight_epoch_%i.csv" %param_epoch, index=False)
| mit |
theoryno3/scikit-learn | sklearn/cross_decomposition/pls_.py | 14 | 28526 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
check_is_fitted(self, 'coef_')
DeprecationWarning("``coefs`` attribute has been deprecated and will be "
"removed in version 0.17. Use ``coef_`` instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d with "
"X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
ray-project/ray | rllib/algorithms/qmix/model.py | 1 | 1580 | from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
class RNNModel(TorchModelV2, nn.Module):
"""The default RNN model for QMIX."""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.obs_size = _get_size(obs_space)
self.rnn_hidden_dim = model_config["lstm_cell_size"]
self.fc1 = nn.Linear(self.obs_size, self.rnn_hidden_dim)
self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)
self.fc2 = nn.Linear(self.rnn_hidden_dim, num_outputs)
self.n_agents = model_config["n_agents"]
@override(ModelV2)
def get_initial_state(self):
# Place hidden states on same device as model.
return [
self.fc1.weight.new(self.n_agents, self.rnn_hidden_dim).zero_().squeeze(0)
]
@override(ModelV2)
def forward(self, input_dict, hidden_state, seq_lens):
x = nn.functional.relu(self.fc1(input_dict["obs_flat"].float()))
h_in = hidden_state[0].reshape(-1, self.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q, [h]
def _get_size(obs_space):
return get_preprocessor(obs_space)(obs_space).size
| apache-2.0 |
PatrickOReilly/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 79 | 5461 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
GEM-benchmark/NL-Augmenter | nlaugmenter/transformations/sentence_additions/transformation.py | 1 | 2096 | from transformers import pipeline, set_seed
from nlaugmenter.interfaces.SentenceOperation import SentenceOperation
from nlaugmenter.tasks.TaskTypes import TaskType
"""
Adds generated sentence into provided sentences or paragraph to create adversarial examples.
"""
class SentenceAdditions(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
]
languages = ["en"]
keywords = [
"discourse",
"model-based",
"transformer-based",
"visual",
"possible-meaning-alteration",
"high-generations",
]
heavy = True
def __init__(self, seed=0, max_outputs=1, max_length=75, model="gpt2-xl"):
super().__init__(seed, max_outputs=max_outputs)
self.seed = seed
self.model = model
self.max_outputs = max_outputs
self.max_length = max_length
def generate(
self, sentence: str, prompt_text=" PARAPHRASE: ", prompt=False
):
perturbed = self.sentence_additions(
text=sentence, prompt_text=prompt_text, prompt=prompt
)
return perturbed
def sentence_additions(self, text, prompt_text, prompt):
set_seed(self.seed)
generator = pipeline("text-generation", model=self.model)
if prompt:
text = text + prompt_text
outputs = generator(
text,
max_length=self.max_length,
num_return_sequences=self.max_outputs,
)
perturbed = []
for sents_with_additions in outputs:
for key, value in sents_with_additions.items():
perturbed.append(value)
return perturbed
# For testing outputs
if __name__ == "__main__":
sentence_addition = SentenceAdditions()
text = "Trinity Medical Imaging is one of the foremost providers of private nuclear medicine imaging in London and Surrey. We work with the finest nuclear medicine consultants from a wide variety of specialist fields."
new_text = sentence_addition.generate(text, prompt=True)
print(new_text)
| mit |
cainiaocome/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 330 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 330 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
ray-project/ray | rllib/tests/test_reproducibility.py | 1 | 2555 | import gym
import numpy as np
import unittest
import ray
from ray.rllib.algorithms.dqn import DQNConfig
from ray.rllib.utils.test_utils import framework_iterator
from ray.tune.registry import register_env
class TestReproducibility(unittest.TestCase):
def test_reproducing_trajectory(self):
class PickLargest(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Box(
low=float("-inf"), high=float("inf"), shape=(4,)
)
self.action_space = gym.spaces.Discrete(4)
def reset(self, **kwargs):
self.obs = np.random.randn(4)
return self.obs
def step(self, action):
reward = self.obs[action]
return self.obs, reward, True, {}
def env_creator(env_config):
return PickLargest()
for fw in framework_iterator(frameworks=("tf", "torch")):
trajs = list()
for trial in range(3):
ray.init()
register_env("PickLargest", env_creator)
config = (
DQNConfig()
.environment("PickLargest")
.debugging(seed=666 if trial in [0, 1] else 999)
.reporting(
min_time_s_per_iteration=0,
min_sample_timesteps_per_iteration=100,
)
.framework(fw)
)
algo = config.build()
trajectory = list()
for _ in range(8):
r = algo.train()
trajectory.append(r["episode_reward_max"])
trajectory.append(r["episode_reward_min"])
trajs.append(trajectory)
algo.stop()
ray.shutdown()
# trial0 and trial1 use same seed and thus
# expect identical trajectories.
all_same = True
for v0, v1 in zip(trajs[0], trajs[1]):
if v0 != v1:
all_same = False
self.assertTrue(all_same)
# trial1 and trial2 use different seeds and thus
# most rewards tend to be different.
diff_cnt = 0
for v1, v2 in zip(trajs[1], trajs[2]):
if v1 != v2:
diff_cnt += 1
self.assertTrue(diff_cnt > 8)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 |
totalgood/pug-data | docs/conf.py | 1 | 8699 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../pug_data")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pug-data'
copyright = u'2016, Hobson Lane'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from pug_data import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pug-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'pug-data Documentation',
u'Hobson Lane', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
ray-project/ray | python/ray/train/huggingface/huggingface_checkpoint.py | 1 | 3985 | import os
from typing import TYPE_CHECKING, Type, Optional, Union
import torch
import transformers
import transformers.modeling_utils
import transformers.trainer
import transformers.training_args
from transformers.trainer import TRAINING_ARGS_NAME, WEIGHTS_NAME
from ray.air._internal.checkpointing import save_preprocessor_to_dir
from ray.air._internal.torch_utils import load_torch_model
from ray.air.checkpoint import Checkpoint
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
@PublicAPI(stability="alpha")
class HuggingFaceCheckpoint(Checkpoint):
"""A :py:class:`~ray.air.checkpoint.Checkpoint` with HuggingFace-specific
functionality.
Create this from a generic :py:class:`~ray.air.checkpoint.Checkpoint` by calling
``HuggingFaceCheckpoint.from_checkpoint(ckpt)``
"""
@classmethod
def from_model(
cls,
model: Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module],
tokenizer: Optional[transformers.PreTrainedTokenizer] = None,
*,
path: os.PathLike,
preprocessor: Optional["Preprocessor"] = None,
) -> "HuggingFaceCheckpoint":
"""Create a :py:class:`~ray.air.checkpoint.Checkpoint` that stores a
HuggingFace model.
Args:
model: The pretrained transformer or Torch model to store in the
checkpoint.
tokenizer: The Tokenizer to use in the Transformers pipeline for inference.
path: The directory where the checkpoint will be stored.
preprocessor: A fitted preprocessor to be applied before inference.
Returns:
A :py:class:`HuggingFaceCheckpoint` containing the specified model.
"""
if not isinstance(model, transformers.modeling_utils.PreTrainedModel):
state_dict = model.state_dict()
torch.save(state_dict, os.path.join(path, WEIGHTS_NAME))
else:
model.save_pretrained(path)
if tokenizer:
tokenizer.save_pretrained(path)
if preprocessor:
save_preprocessor_to_dir(preprocessor, path)
checkpoint = cls.from_directory(path)
return checkpoint
def get_model(
self,
model: Union[
Type[transformers.modeling_utils.PreTrainedModel], torch.nn.Module
],
**pretrained_model_kwargs,
) -> Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module]:
"""Retrieve the model stored in this checkpoint."""
with self.as_directory() as checkpoint_path:
if isinstance(model, torch.nn.Module):
state_dict = torch.load(
os.path.join(checkpoint_path, WEIGHTS_NAME), map_location="cpu"
)
model = load_torch_model(saved_model=state_dict, model_definition=model)
else:
model = model.from_pretrained(
checkpoint_path, **pretrained_model_kwargs
)
return model
def get_tokenizer(
self,
tokenizer: Type[transformers.PreTrainedTokenizer],
**kwargs,
) -> Optional[transformers.PreTrainedTokenizer]:
"""Create a tokenizer using the data stored in this checkpoint."""
with self.as_directory() as checkpoint_path:
return tokenizer.from_pretrained(checkpoint_path, **kwargs)
def get_training_arguments(self) -> transformers.training_args.TrainingArguments:
"""Retrieve the training arguments stored in this checkpoint."""
with self.as_directory() as checkpoint_path:
training_args_path = os.path.join(checkpoint_path, TRAINING_ARGS_NAME)
if os.path.exists(training_args_path):
with open(training_args_path, "rb") as f:
training_args = torch.load(f, map_location="cpu")
else:
training_args = None
return training_args
| apache-2.0 |
ray-project/ray | python/ray/train/torch/torch_trainer.py | 1 | 8378 | from typing import TYPE_CHECKING, Callable, Dict, Optional, Union
from ray.air.checkpoint import Checkpoint
from ray.air.config import DatasetConfig, RunConfig, ScalingConfig
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.torch.config import TorchConfig
from ray.train.trainer import GenDataset
from ray.util import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
@PublicAPI(stability="beta")
class TorchTrainer(DataParallelTrainer):
"""A Trainer for data parallel PyTorch training.
This Trainer runs the function ``train_loop_per_worker`` on multiple Ray
Actors. These actors already have the necessary torch process group already
configured for distributed PyTorch training.
The ``train_loop_per_worker`` function is expected to take in either 0 or 1
arguments:
.. code-block:: python
def train_loop_per_worker():
...
.. code-block:: python
def train_loop_per_worker(config: Dict):
...
If ``train_loop_per_worker`` accepts an argument, then
``train_loop_config`` will be passed in as the argument. This is useful if you
want to tune the values in ``train_loop_config`` as hyperparameters.
If the ``datasets`` dict contains a training dataset (denoted by
the "train" key), then it will be split into multiple dataset
shards that can then be accessed by ``session.get_dataset_shard("train")`` inside
``train_loop_per_worker``. All the other datasets will not be split and
``session.get_dataset_shard(...)`` will return the the entire Dataset.
Inside the ``train_loop_per_worker`` function, you can use any of the
:ref:`Ray AIR session methods <air-session-ref>`.
.. code-block:: python
def train_loop_per_worker():
# Report intermediate results for callbacks or logging and
# checkpoint data.
session.report(...)
# Returns dict of last saved checkpoint.
session.get_checkpoint()
# Returns the Ray Dataset shard for the given key.
session.get_dataset_shard("my_dataset")
# Returns the total number of workers executing training.
session.get_world_size()
# Returns the rank of this worker.
session.get_world_rank()
# Returns the rank of the worker on the current node.
session.get_local_rank()
You can also use any of the Torch specific function utils,
such as :func:`ray.train.torch.get_device` and :func:`ray.train.torch.prepare_model`
.. code-block:: python
def train_loop_per_worker():
# Prepares model for distribted training by wrapping in
# `DistributedDataParallel` and moving to correct device.
train.torch.prepare_model(...)
# Configures the dataloader for distributed training by adding a
# `DistributedSampler`.
# You should NOT use this if you are doing
# `session.get_dataset_shard(...).iter_torch_batches(...)`
train.torch.prepare_data_loader(...)
# Returns the current torch device.
train.torch.get_device()
Any returns from the ``train_loop_per_worker`` will be discarded and not
used or persisted anywhere.
To save a model to use for the ``TorchPredictor``, you must save it under the
"model" kwarg in ``Checkpoint`` passed to ``session.report()``.
Example:
.. code-block:: python
import torch
import torch.nn as nn
import ray
from ray import train
from ray.air import session, Checkpoint
from ray.train.torch import TorchTrainer
from ray.air.config import ScalingConfig
input_size = 1
layer_size = 15
output_size = 1
num_epochs = 3
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.layer1 = nn.Linear(input_size, layer_size)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(layer_size, output_size)
def forward(self, input):
return self.layer2(self.relu(self.layer1(input)))
def train_loop_per_worker():
dataset_shard = session.get_dataset_shard("train")
model = NeuralNetwork()
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
model = train.torch.prepare_model(model)
for epoch in range(num_epochs):
for batches in dataset_shard.iter_torch_batches(
batch_size=32, dtypes=torch.float
):
inputs, labels = torch.unsqueeze(batches["x"], 1), batches["y"]
output = model(inputs)
loss = loss_fn(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"epoch: {epoch}, loss: {loss.item()}")
session.report(
{},
checkpoint=Checkpoint.from_dict(
dict(epoch=epoch, model=model.state_dict()
),
)
train_dataset = ray.data.from_items(
[{"x": x, "y": 2 * x + 1} for x in range(200)]
)
scaling_config = ScalingConfig(num_workers=3)
# If using GPUs, use the below scaling config instead.
# scaling_config = ScalingConfig(num_workers=3, use_gpu=True)
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
scaling_config=scaling_config,
datasets={"train": train_dataset})
result = trainer.fit()
Args:
train_loop_per_worker: The training function to execute.
This can either take in no arguments or a ``config`` dict.
train_loop_config: Configurations to pass into
``train_loop_per_worker`` if it accepts an argument.
torch_config: Configuration for setting up the PyTorch backend. If set to
None, use the default configuration. This replaces the ``backend_config``
arg of ``DataParallelTrainer``.
scaling_config: Configuration for how to scale data parallel training.
dataset_config: Configuration for dataset ingest.
run_config: Configuration for the execution of the training run.
datasets: Any Ray Datasets to use for training. Use
the key "train" to denote which dataset is the training
dataset. If a ``preprocessor`` is provided and has not already been fit,
it will be fit on the training dataset. All datasets will be transformed
by the ``preprocessor`` if one is provided.
preprocessor: A ``ray.data.Preprocessor`` to preprocess the
provided datasets.
resume_from_checkpoint: A checkpoint to resume training from.
"""
def __init__(
self,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
torch_config: Optional[TorchConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[Dict[str, DatasetConfig]] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
preprocessor: Optional["Preprocessor"] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
if not torch_config:
torch_config = TorchConfig()
super(TorchTrainer, self).__init__(
train_loop_per_worker=train_loop_per_worker,
train_loop_config=train_loop_config,
backend_config=torch_config,
scaling_config=scaling_config,
dataset_config=dataset_config,
run_config=run_config,
datasets=datasets,
preprocessor=preprocessor,
resume_from_checkpoint=resume_from_checkpoint,
)
| apache-2.0 |
yyjiang/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 126 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/datasets/base.py | 8 | 26095 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris(return_X_y=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer(return_X_y=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Parameters
----------
return_X_y : boolean, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10, return_X_y=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(np.int)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
if return_X_y:
return flat_data, target
return Bunch(data=flat_data,
target=target,
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes(return_X_y=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
if return_X_y:
return data, target
return Bunch(data=data, target=target,
feature_names=['age', 'sex', 'bmi', 'bp',
's1', 's2', 's3', 's4', 's5', 's6'])
def load_linnerud(return_X_y=False):
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
if return_X_y:
return data_exercise, data_physiological
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston(return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/datasets/tests/test_lfw.py | 228 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
GoogleCloudPlatform/cloudml-samples | tpu/templates/tpu_estimator/trainer.py | 1 | 6890 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tensorflow as tf
# ## The model function
# There are two differences in the model function when using TPUs:
#
# * The optimizer needs to be wrapped in a `tf.contrib.tpu.CrossShardOptimizer`.
#
# * The model function should return a `tf.contrib.tpu.TPUEstimatorSpec`.
#
def model_fn(features, labels, mode, params):
# build model
global_step = tf.train.get_global_step()
hidden = tf.layers.dense(features, 10, activation=tf.nn.relu)
output = tf.layers.dense(hidden, 1)
predictions = output
loss = None
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# define loss
loss = tf.nn.l2_loss(predictions - labels)
# define train_op
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
# wrapper to make the optimizer work with TPUs
if params['use_tpu']:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=global_step)
if params['use_tpu']:
# TPU version of EstimatorSpec
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
# ## The input function
# tf.data.Dataset is the best choice for building the input function.
# Even though datasets can determine the shape of the data at *runtime*,
# TPUs need to know the shape of the tensors *when the graph is built*.
# This typically means two things:
#
# * Set `drop_remainder=True` in the `dataset.batch` call.
#
# * Set tensor shapes to make sure the features and labels do not have any unknown dimensions.
#
def train_input_fn(params={}):
# make some fake regression data
x = np.random.rand(100, 5)
w = np.random.rand(5)
y = np.sum(x * w, axis=1)
# TPUs currently do not support float64
x_tensor = tf.constant(x, dtype=tf.float32)
y_tensor = tf.constant(y, dtype=tf.float32)
# create tf.data.Dataset
dataset = tf.data.Dataset.from_tensor_slices((x_tensor, y_tensor))
# TPUEstimator passes params when calling input_fn
batch_size = params.get('batch_size', 16)
dataset = dataset.repeat().shuffle(32).batch(batch_size, drop_remainder=True)
# TPUs need to know all dimensions when the graph is built
# Datasets know the batch size only when the graph is run
def set_shapes(features, labels):
features_shape = features.get_shape().merge_with([batch_size, None])
labels_shape = labels.get_shape().merge_with([batch_size])
features.set_shape(features_shape)
labels.set_shape(labels_shape)
return features, labels
dataset = dataset.map(set_shapes)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
# ## The TPUEstimator
# The TPUEstimator is similar to the usual Estimator, but requires a
# slightly different run_config, since it needs to know where to connect
# to the TPU workers.
#
# This is done through `tf.contrib.cluster_resolver.TPUClusterResolver`,
# which is passed into a `tf.contrib.tpu.TPUConfig`, which in turn is
# passed into `tf.contrib.tpu.RunConfig`.
#
def main(args):
# pass the args as params so the model_fn can use
# the TPU specific args
params = vars(args)
if args.use_tpu:
# additional configs required for using TPUs
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)
tpu_config = tf.contrib.tpu.TPUConfig(
num_shards=8, # using Cloud TPU v2-8
iterations_per_loop=args.save_checkpoints_steps)
# use the TPU version of RunConfig
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=args.model_dir,
tpu_config=tpu_config,
save_checkpoints_steps=args.save_checkpoints_steps,
save_summary_steps=100)
# TPUEstimator
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
params=params,
train_batch_size=args.train_batch_size,
eval_batch_size=32,
export_to_tpu=False)
else:
config = tf.estimator.RunConfig(model_dir=args.model_dir)
estimator = tf.estimator.Estimator(
model_fn,
config=config,
params=params)
estimator.train(train_input_fn, max_steps=args.max_steps)
# ## Training
# Depending on where the training job is run, the `TPUClusterResolver`
# needs different input to access the TPU workers:
#
# * On AI Platform: the input should be `None`
# and the service will handle it.
#
# * On Compute Engine: the input should be the name of TPU you create
# before starting the training job.
#
# * On Colab: the input should be the grpc URI from the environment
# variable `COLAB_TPU_ADDR`; the Colab runtime type should be set to
# TPU for this environment variable to be automatically set.
#
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-dir',
type=str,
default='/tmp/tpu-template',
help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.')
parser.add_argument(
'--max-steps',
type=int,
default=1000,
help='The total number of steps to train the model.')
parser.add_argument(
'--train-batch-size',
type=int,
default=16,
help='The training batch size. The training batch is divided evenly across the TPU cores.')
parser.add_argument(
'--save-checkpoints-steps',
type=int,
default=100,
help='The number of training steps before saving each checkpoint.')
parser.add_argument(
'--use-tpu',
action='store_true',
help='Whether to use TPU.')
parser.add_argument(
'--tpu',
default=None,
help='The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.')
args, _ = parser.parse_known_args()
main(args)
| apache-2.0 |
wenhuchen/ETHZ-Bootstrapped-Captioning | attention_generator/capgen.py | 1 | 26939 | '''
Source code for an attention based image caption generation system described
in:
Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
International Conference for Machine Learning (2015)
http://arxiv.org/abs/1502.03044
Comments in square brackets [] indicate references to the equations/
more detailed explanations in the above paper.
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy
import copy
from collections import OrderedDict
import warnings
from util import ortho_weight, norm_weight, tanh, rectifier, linear, avlist
from util import dropout_layer, _p
from layers import *
##################################################
################ PREPROCESSING ###################
##################################################
import coco
import commoncrawl
import flickr
# datasets: 'name', 'load_data: returns iterator', 'prepare_data: some preprocessing'
datasets = {'coco': (coco.load_data, coco.prepare_data),
'commoncrawl': (commoncrawl.load_data, commoncrawl.prepare_data),
'commoncrawl_addnoise': (commoncrawl.load_data, commoncrawl.prepare_data),
'commoncrawl_larger': (commoncrawl.load_data, commoncrawl.prepare_data),
'commonvisual': (commoncrawl.load_data, commoncrawl.prepare_data),
'flickr': (flickr.load_data, flickr.prepare_data)}
def get_dataset(name):
return datasets[name][0], datasets[name][1]
##################################################
############## NEURAL NETWORK DEF ################
##################################################
"""
Neural network layer definitions.
The life-cycle of each of these layers is as follows
1) The param_init of the layer is called, which creates
the weights of the network.
2) The fprop is called which builds that part of the Theano graph
using the weights created in step 1). This automatically links
these variables to the graph.
Each prefix is used like a key and should be unique
to avoid naming conflicts when building the graph.
"""
#layers: 'name': ('parameter initializer', 'fprop')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lstm_cond': ('param_init_lstm_cond', 'lstm_cond_layer'),
'lstm_cond_nox':('param_init_lstm_cond_nox', 'lstm_cond_nox_layer')
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
##################################################
################ INITIALIZATIONS #################
##################################################
# parameter initialization
# [roughly in the same order as presented in section 3.1.2]
# See above get_layer function + layers var for neural network definition
def init_params(options):
params = OrderedDict()
# Visual concept embedding
if not options['with_glove']:
params['VCemb'] = norm_weight(options['n_words'], options['dim_word'])
# embedding: [matrix E in paper]
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
#params = get_layer('ff')[0](options, params, prefix='CNNTrans', nin=options['cnn_dim'], nout=options['dim'])
ctx_dim = options['ctx_dim']
if options['lstm_encoder']: # potential feature that runs an LSTM over the annotation vectors
# use input attentive encoder
params = get_layer('lstm_cond_nox')[0](options, params, prefix='encoder', dim=ctx_dim, dimctx=options['semantic_dim'])
# potentially deep decoder (warning: should work but somewhat untested)
for lidx in range(options['n_layers_lstm']):
ff_state_prefix = 'CNNTrans_%d'%lidx if lidx > 0 else 'CNNTrans'
ff_memory_prefix = 'CNN_memory_%d'%lidx if lidx > 0 else 'CNN_memory'
lstm_prefix = 'decoder_%d'%lidx if lidx > 0 else 'decoder'
nin_lstm = options['dim'] if lidx > 0 else options['dim_word']
params = get_layer('ff')[0](options, params, prefix=ff_state_prefix, nin=options['cnn_dim'], nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix=ff_memory_prefix, nin=options['cnn_dim'], nout=options['dim'])
params = get_layer('lstm_cond')[0](options, params, prefix=lstm_prefix,
nin=nin_lstm, dim=options['dim'],
dimctx=ctx_dim)
# readout: [equation (7)]
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm', nin=options['dim'], nout=options['dim_word'])
if options['ctx2out']:
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx', nin=ctx_dim, nout=options['dim_word'])
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
params = get_layer('ff')[0](options, params, prefix='ff_logit_h%d'%lidx, nin=options['dim_word'], nout=options['dim_word'])
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim_word'], nout=options['n_words'])
return params
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
##################################################
############### LAYER DEFINITIONS ################
##################################################
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
##################################################
# build a training model
def build_model(tparams, options):
""" Builds the entire computational graph used for training
Basically does a forward pass through the data and calculates the cost function
[This function builds a model described in Section 3.1.2 onwards
as the convolutional feature are precomputed, some extra features
which were not used are also implemented here.]
Parameters
----------
tparams : OrderedDict
maps names of variables to theano shared variables
options : dict
big dictionary with all the settings and hyperparameters
Returns
-------
trng: theano random number generator
Used for dropout, etc
use_noise: theano shared variable
flag that toggles noise on and off
[x, mask, ctx, cnn_features]: theano variables
Represent the captions, binary mask, and annotations
for a single batch (see dimensions below)
alphas: theano variables
Attention weights
alpha_sample: theano variable
Sampled attention weights used in REINFORCE for stochastic
attention: [see the learning rule in eq (12)]
cost: theano variable
negative log likelihood
opt_outs: OrderedDict
extra outputs required depending on configuration in options
"""
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples,
x = tensor.matrix('x', dtype='int64')
# mask: #samples,
mask = tensor.matrix('mask', dtype='float32')
# context: #samples x #visual_words x dim
if options['with_glove']:
ctx = tensor.tensor3('ctx', dtype='float32')
new_ctx = ctx
else:
ctx = tensor.matrix('ctx', dtype='int32')
new_ctx = tparams['VCemb'][ctx]
# fc7 features: #samples x dim
cnn_features = tensor.matrix('cnn_feats', dtype='float32')
# index into the word embedding matrix, shift it forward in time, the first element is zero
# Time step x S x D
emb = tparams['Wemb'][x.flatten()].reshape([x.shape[0], x.shape[1], options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# forward-backward lstm encoder
if options['lstm_encoder']:
rval, encoder_alphas = get_layer('lstm_cond_nox')[1](tparams, options, prefix='encoder', context=new_ctx)
ctx0 = rval.dimshuffle(1,0,2)
else:
ctx0 = new_ctx
for lidx in range(options['n_layers_lstm']):
init_state_prefix = 'CNNTrans_%d'%lidx if lidx > 0 else 'CNNTrans'
init_memory_prefix = 'CNN_memory_%d'%lidx if lidx > 0 else 'CNN_memory'
lstm_prefix = 'decoder_%d'%lidx if lidx > 0 else 'decoder'
lstm_inps = proj_h if lidx > 0 else emb
init_state = get_layer('ff')[1](tparams, cnn_features, options, prefix=init_state_prefix, activ='tanh')
init_memory = get_layer('ff')[1](tparams, cnn_features, options, prefix=init_memory_prefix, activ='tanh')
attn_updates = []
proj, updates = get_layer('lstm_cond')[1](tparams, lstm_inps, options,
prefix=lstm_prefix,
mask=mask, context=ctx0,
one_step=False,
init_state=init_state,
init_memory=init_memory,
trng=trng,
use_noise=use_noise)
attn_updates += updates
proj_h = proj[0]
alphas = proj[2]
ctxs = proj[4]
if options['use_dropout']:
proj_h = dropout_layer(proj_h, use_noise, trng)
# compute word probabilities
# [equation (7)]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs, options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
# compute softmax
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1], logit_shp[2]]))
# Index into the computed probability to give the log likelihood
x_flat = x.flatten()
p_flat = probs.flatten()
cost = -tensor.log(p_flat[tensor.arange(x_flat.shape[0])*probs.shape[1]+x_flat]+1e-8)
cost = cost.reshape([x.shape[0], x.shape[1]])
masked_cost = cost * mask
#align_cost = (-standard_aligns*alphas).sum(2)
cost = masked_cost.sum(0)
# optional outputs
opt_outs = dict()
if options['lstm_encoder']:
return trng, use_noise, [x, mask, ctx, cnn_features], [alphas, encoder_alphas], cost, opt_outs
else:
return trng, use_noise, [x, mask, ctx, cnn_features], [alphas], cost, opt_outs
# build a sampler
def build_sampler(tparams, options, use_noise, trng):
""" Builds a sampler used for generating from the model
Parameters
----------
tparams : OrderedDict
maps names of variables to theano shared variables
options : dict
big dictionary with all the settings and hyperparameters
use_noise: boolean
If true, add noise to the sampling
trng: random number generator
Returns
-------
f_init : theano function
Input: annotation, Output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
Takes the previous word/state/memory + ctx0 and runs ne
step through the lstm (used for beam search)
"""
# context: #annotations x dim
if options['with_glove']:
ctx = tensor.matrix('ctx_sampler', dtype='float32')
new_ctx = ctx
else:
ctx = tensor.vector('ctx_sampler', dtype='int32')
new_ctx = tparams['VCemb'][ctx]
if options['lstm_encoder']:
ctx0, _ = get_layer('lstm_cond_nox')[1](tparams, options, prefix='encoder', context=new_ctx)
else:
ctx0 = new_ctx
# initial state/cell
cnn_features = tensor.vector('x_feats', dtype='float32')
init_state, init_memory = [], []
for lidx in range(options['n_layers_lstm']):
init_state_prefix = 'CNNTrans_%d'%lidx if lidx > 0 else 'CNNTrans'
init_memory_prefix = 'CNN_memory_%d'%lidx if lidx > 0 else 'CNN_memory'
init_state.append(get_layer('ff')[1](tparams, cnn_features, options, prefix=init_state_prefix, activ='tanh'))
init_memory.append(get_layer('ff')[1](tparams, cnn_features, options, prefix=init_memory_prefix, activ='tanh'))
print 'Building f_init...',
f_init = theano.function([ctx, cnn_features], [ctx0]+init_state+init_memory, name='f_init', profile=False, allow_input_downcast=True)
print 'Done'
# build f_next
x = tensor.vector('x_sampler', dtype='int64')
init_state = []
init_memory = []
for lidx in range(options['n_layers_lstm']):
init_state.append(tensor.matrix('init_state', dtype='float32'))
init_memory.append(tensor.matrix('init_memory', dtype='float32'))
# for the first word (which is coded with -1), emb should be all zero
emb = tensor.switch(x[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb'].shape[1]),
tparams['Wemb'][x])
next_state, next_memory, ctxs = [], [], []
for lidx in range(options['n_layers_lstm']):
decoder_prefix = 'decoder_%d'%lidx if lidx > 0 else 'decoder'
inps = proj_h if lidx > 0 else emb
proj = get_layer('lstm_cond')[1](tparams, inps, options,
prefix=decoder_prefix,
context=ctx0,
one_step=True,
init_state=init_state[lidx],
init_memory=init_memory[lidx],
trng=trng,
use_noise=use_noise)
next_state.append(proj[0])
next_memory.append(proj[1])
ctxs.append(proj[4])
next_alpha = proj[2]
proj_h = proj[0]
if options['use_dropout']:
proj_h = dropout_layer(proj[0], use_noise, trng)
else:
proj_h = proj[0]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs[-1], options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
f_next = theano.function([x, ctx0]+init_state+init_memory, [next_probs, next_sample, next_alpha]+
next_state+next_memory, name='f_next', profile=False, allow_input_downcast=True)
return f_init, f_next
def gen_sample_ensemble(f_init, f_next, ctx, cnn_feats, options,
trng=None, k=1, maxlen=30):
# assert the f_init and f_next to be lists
assert len(f_init) == len(f_next)
sample = []
sample_score = []
sample_alpha = []
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k, 'float32')
hyp_states = []
hyp_memories = []
hyp_alphas = [[] for _ in range(k)]
# only matters if we use lstm encoder
rval = [f(ctx, cnn_feats) for f in f_init]
ctx0 = [r[0] for r in rval]
next_state = [[r[1].reshape((1, rval[0][1].shape[-1]))] for r in rval]
next_memory = [[r[2].reshape((1, rval[0][2].shape[-1]))] for r in rval]
# reminder: if next_w = -1, the switch statement
# in build_sampler is triggered -> (empty word embeddings)
# next_w = -1 * numpy.ones((1,)).astype('int64')
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
# our "next" state/memory in our previous step is now our "initial" state and memory
rval = [f(*([next_w, c]+s+m)) for f,s,m,c in zip(f_next, next_state, next_memory, ctx0)]
next_p = avlist([r[0] for r in rval])
next_alpha = avlist([r[2] for r in rval])
# extract all the states and memories
next_state = [r[3] for r in rval]
next_memory = [r[4] for r in rval]
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)] # (k-dead_k) numpy array of with min nll
voc_size = next_p.shape[1]
# indexing into the correct selected captions
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat] # extract costs from top hypothesis
# a bunch of lists to hold future hypothesis
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_alphas = []
new_hyp_states = [[] for _ in range(len(f_init))]
new_hyp_memories = [[] for _ in range(len(f_init))]
# get the corresponding hypothesis and append the predicted word
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx]) # copy in the cost of that hypothesis
new_hyp_alphas.append(hyp_alphas[ti] + [next_alpha[ti]])
for eidx in range(len(f_init)):
new_hyp_states[eidx].append(copy.copy(next_state[eidx][ti]))
new_hyp_memories[eidx].append(copy.copy(next_memory[eidx][ti]))
# check the finished samples for <eos> character
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = [[] for _ in range(len(f_init))]
hyp_memories = [[] for _ in range(len(f_init))]
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
sample_alpha.append(new_hyp_alphas[idx])
dead_k += 1 # completed sample!
else:
new_live_k += 1 # collect collect correct states/memories
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_alphas[idx] = new_hyp_alphas[idx]
for eidx in range(len(f_init)):
hyp_states[eidx].append(new_hyp_states[eidx][idx])
hyp_memories[eidx].append(new_hyp_memories[eidx][idx])
hyp_scores = numpy.asarray(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = [[numpy.array(hyp_states[eidx])] for eidx in range(len(f_init))]
next_memory = [[numpy.array(hyp_memories[eidx])] for eidx in range(len(f_init))]
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score, sample_alpha
# generate sample
def gen_sample(f_init, f_next, ctx, cnn_feats, options,
trng=None, k=1, maxlen=30):
"""Generate captions with beam search.
Uses layer definitions and functions defined by build_sampler
This function uses the beam search algorithm to conditionally
generate candidate captions. Supports beamsearch.
Parameters
-sl---------
f_init : theano function
input: annotation, output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
takes the previous word/state/memory + ctx0 and runs one
step through the lstm
ctx0 : numpy array
annotation from convnet, of dimension #annotations x # dimension
[e.g (30 x 300)]
options : dict
dictionary of flags and options
trng : random number generator
k : int
size of beam search
maxlen : int
maximum allowed caption size
Returns
-------
sample : list of list
each sublist contains an (encoded) sample from the model
sample_score : numpy array
scores of each sample
"""
sample = []
sample_score = []
sample_alpha = []
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
hyp_memories = []
hyp_alphas = [[] for _ in range(k)]
# only matters if we use lstm encoder
rval = f_init(ctx, cnn_feats)
ctx0 = rval[0]
next_state = []
next_memory = []
# the states are returned as a: (dim,) and this is just a reshape to (1, dim)
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[1+lidx])
next_state[-1] = next_state[-1].reshape([1, next_state[-1].shape[0]])
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(rval[1+options['n_layers_lstm']+lidx])
next_memory[-1] = next_memory[-1].reshape([1, next_memory[-1].shape[0]])
# reminder: if next_w = -1, the switch statement
# in build_sampler is triggered -> (empty word embeddings)
# next_w = -1 * numpy.ones((1,)).astype('int64')
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
# our "next" state/memory in our previous step is now our "initial" state and memory
rval = f_next(*([next_w, ctx0]+next_state+next_memory))
next_p = rval[0]
next_w = rval[1]
next_alpha = rval[2]
# extract all the states and memories
next_state = []
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[3+lidx])
next_memory.append(rval[3+options['n_layers_lstm']+lidx])
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)] # (k-dead_k) numpy array of with min nll
voc_size = next_p.shape[1]
# indexing into the correct selected captions
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat] # extract costs from top hypothesis
# a bunch of lists to hold future hypothesis
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
new_hyp_alphas = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states.append([])
new_hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories.append([])
# get the corresponding hypothesis and append the predicted word
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx]) # copy in the cost of that hypothesis
new_hyp_alphas.append(hyp_alphas[ti] + [next_alpha[ti]])
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states[lidx].append(copy.copy(next_state[lidx][ti]))
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories[lidx].append(copy.copy(next_memory[lidx][ti]))
# check the finished samples for <eos> character
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for lidx in xrange(options['n_layers_lstm']):
hyp_states.append([])
hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
hyp_memories.append([])
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
sample_alpha.append(new_hyp_alphas[idx])
dead_k += 1 # completed sample!
else:
new_live_k += 1 # collect collect correct states/memories
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_alphas[idx] = new_hyp_alphas[idx]
for lidx in xrange(options['n_layers_lstm']):
hyp_states[lidx].append(new_hyp_states[lidx][idx])
for lidx in xrange(options['n_layers_lstm']):
hyp_memories[lidx].append(new_hyp_memories[lidx][idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(numpy.array(hyp_states[lidx]))
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(numpy.array(hyp_memories[lidx]))
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score, sample_alpha
def validate_options(options):
# Put friendly reminders here
if options['dim_word'] > options['dim']:
warnings.warn('dim_word should only be as large as dim.')
if options['lstm_encoder']:
warnings.warn('Note that this is a 1-D bidirectional LSTM, not 2-D one.')
if options['use_dropout_lstm']:
warnings.warn('dropout in the lstm seems not to help')
return options
| bsd-3-clause |
glennq/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 80 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 80 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/cluster/plot_affinity_propagation.py | 346 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
astocko/statsmodels | examples/incomplete/wls_extended.py | 32 | 16137 | """
Weighted Least Squares
example is extended to look at the meaning of rsquared in WLS,
at outliers, compares with RLM and a short bootstrap
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = sm.datasets.ccard.load()
data.exog = sm.add_constant(data.exog, prepend=False)
ols_fit = sm.OLS(data.endog, data.exog).fit()
# perhaps the residuals from this fit depend on the square of income
incomesq = data.exog[:,2]
plt.scatter(incomesq, ols_fit.resid)
#@savefig wls_resid_check.png
plt.grid()
# If we think that the variance is proportional to income**2
# we would want to weight the regression by income
# the weights argument in WLS weights the regression by its square root
# and since income enters the equation, if we have income/income
# it becomes the constant, so we would want to perform
# this type of regression without an explicit constant in the design
#..data.exog = data.exog[:,:-1]
wls_fit = sm.WLS(data.endog, data.exog[:,:-1], weights=1/incomesq).fit()
# This however, leads to difficulties in interpreting the post-estimation
# statistics. Statsmodels does not yet handle this elegantly, but
# the following may be more appropriate
# explained sum of squares
ess = wls_fit.uncentered_tss - wls_fit.ssr
# rsquared
rsquared = ess/wls_fit.uncentered_tss
# mean squared error of the model
mse_model = ess/(wls_fit.df_model + 1) # add back the dof of the constant
# f statistic
fvalue = mse_model/wls_fit.mse_resid
# adjusted r-squared
rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared)
#Trying to figure out what's going on in this example
#----------------------------------------------------
#JP: I need to look at this again. Even if I exclude the weight variable
# from the regressors and keep the constant in then the reported rsquared
# stays small. Below also compared using squared or sqrt of weight variable.
# TODO: need to add 45 degree line to graphs
wls_fit3 = sm.WLS(data.endog, data.exog[:,(0,1,3,4)], weights=1/incomesq).fit()
print(wls_fit3.summary())
print('corrected rsquared')
print((wls_fit3.uncentered_tss - wls_fit3.ssr)/wls_fit3.uncentered_tss)
plt.figure();
plt.title('WLS dropping heteroscedasticity variable from regressors');
plt.plot(data.endog, wls_fit3.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_drop_het.png
plt.ylim([0,2000]);
print('raw correlation of endog and fittedvalues')
print(np.corrcoef(data.endog, wls_fit.fittedvalues))
print('raw correlation coefficient of endog and fittedvalues squared')
print(np.corrcoef(data.endog, wls_fit.fittedvalues)[0,1]**2)
# compare with robust regression,
# heteroscedasticity correction downweights the outliers
rlm_fit = sm.RLM(data.endog, data.exog).fit()
plt.figure();
plt.title('using robust for comparison');
plt.plot(data.endog, rlm_fit.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_robust_compare.png
plt.ylim([0,2000]);
#What is going on? A more systematic look at the data
#----------------------------------------------------
# two helper functions
def getrsq(fitresult):
'''calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog) arrays
regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered)
'''
if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'):
resid = fitresult.resid
endog = fitresult.model.endog
nobs = fitresult.nobs
else:
resid = fitresult[0]
endog = fitresult[1]
nobs = resid.shape[0]
rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss
def index_trim_outlier(resid, k):
'''returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : array, 1d
index array with k outliers removed
outlier_index : array, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values.
'''
sort_index = np.argsort(np.abs(resid))
# index of non-outlier
trimmed_index = np.sort(sort_index[:-k])
outlier_index = np.sort(sort_index[-k:])
return trimmed_index, outlier_index
#Comparing estimation results for ols, rlm and wls with and without outliers
#---------------------------------------------------------------------------
#ols_test_fit = sm.OLS(data.endog, data.exog).fit()
olskeep, olsoutl = index_trim_outlier(ols_fit.resid, 2)
print('ols outliers', olsoutl, ols_fit.resid[olsoutl])
ols_fit_rm2 = sm.OLS(data.endog[olskeep], data.exog[olskeep,:]).fit()
rlm_fit_rm2 = sm.RLM(data.endog[olskeep], data.exog[olskeep,:]).fit()
#weights = 1/incomesq
results = [ols_fit, ols_fit_rm2, rlm_fit, rlm_fit_rm2]
#Note: I think incomesq is already square
for weights in [1/incomesq, 1/incomesq**2, np.sqrt(incomesq)]:
print('\nComparison OLS and WLS with and without outliers')
wls_fit0 = sm.WLS(data.endog, data.exog, weights=weights).fit()
wls_fit_rm2 = sm.WLS(data.endog[olskeep], data.exog[olskeep,:],
weights=weights[olskeep]).fit()
wlskeep, wlsoutl = index_trim_outlier(ols_fit.resid, 2)
print('2 outliers candidates and residuals')
print(wlsoutl, wls_fit.resid[olsoutl])
# redundant because ols and wls outliers are the same:
##wls_fit_rm2_ = sm.WLS(data.endog[wlskeep], data.exog[wlskeep,:],
## weights=1/incomesq[wlskeep]).fit()
print('outliers ols, wls:', olsoutl, wlsoutl)
print('rsquared')
print('ols vs ols rm2', ols_fit.rsquared, ols_fit_rm2.rsquared)
print('wls vs wls rm2', wls_fit0.rsquared, wls_fit_rm2.rsquared) #, wls_fit_rm2_.rsquared
print('compare R2_resid versus R2_wresid')
print('ols minus 2', getrsq(ols_fit_rm2)[0],)
print(getrsq((ols_fit_rm2.wresid, ols_fit_rm2.model.wendog))[0])
print('wls ', getrsq(wls_fit)[0],)
print(getrsq((wls_fit.wresid, wls_fit.model.wendog))[0])
print('wls minus 2', getrsq(wls_fit_rm2)[0])
# next is same as wls_fit_rm2.rsquared for cross checking
print(getrsq((wls_fit_rm2.wresid, wls_fit_rm2.model.wendog))[0])
#print(getrsq(wls_fit_rm2_)[0],
#print(getrsq((wls_fit_rm2_.wresid, wls_fit_rm2_.model.wendog))[0]
results.extend([wls_fit0, wls_fit_rm2])
print(' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)')
print('Parameter estimates')
print(np.column_stack([r.params for r in results]))
print('R2 original data, next line R2 weighted data')
print(np.column_stack([getattr(r, 'rsquared', None) for r in results]))
print('Standard errors')
print(np.column_stack([getattr(r, 'bse', None) for r in results]))
print('Heteroscedasticity robust standard errors (with ols)')
print('with outliers')
print(np.column_stack([getattr(ols_fit, se, None) for se in ['HC0_se', 'HC1_se', 'HC2_se', 'HC3_se']]))
#..'''
#..
#.. ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)
#..Parameter estimates
#..[[ -3.08181404 -5.06103843 -4.98510966 -5.34410309 -2.69418516 -3.1305703 -1.43815462 -1.58893054 -3.57074829 -6.80053364]
#.. [ 234.34702702 115.08753715 129.85391456 109.01433492 158.42697752 128.38182357 60.95113284 100.25000841 254.82166855 103.75834726]
#.. [ -14.99684418 -5.77558429 -6.46204829 -4.77409191 -7.24928987 -7.41228893 6.84943071 -3.34972494 -16.40524256 -4.5924465 ]
#.. [ 27.94090839 85.46566835 89.91389709 95.85086459 60.44877369 79.7759146 55.9884469 60.97199734 -3.8085159 84.69170048]
#.. [-237.1465136 39.51639838 -15.50014814 31.39771833 -114.10886935 -40.04207242 -6.41976501 -38.83583228 -260.72084271 117.20540179]]
#..
#..R2 original data, next line R2 weighted data
#..[[ 0.24357792 0.31745994 0.19220308 0.30527648 0.22861236 0.3112333 0.06573949 0.29366904 0.24114325 0.31218669]]
#..[[ 0.24357791 0.31745994 None None 0.05936888 0.0679071 0.06661848 0.12769654 0.35326686 0.54681225]]
#..
#..-> R2 with weighted data is jumping all over
#..
#..standard errors
#..[[ 5.51471653 3.31028758 2.61580069 2.39537089 3.80730631 2.90027255 2.71141739 2.46959477 6.37593755 3.39477842]
#.. [ 80.36595035 49.35949263 38.12005692 35.71722666 76.39115431 58.35231328 87.18452039 80.30086861 86.99568216 47.58202096]
#.. [ 7.46933695 4.55366113 3.54293763 3.29509357 9.72433732 7.41259156 15.15205888 14.10674821 7.18302629 3.91640711]
#.. [ 82.92232357 50.54681754 39.33262384 36.57639175 58.55088753 44.82218676 43.11017757 39.31097542 96.4077482 52.57314209]
#.. [ 199.35166485 122.1287718 94.55866295 88.3741058 139.68749646 106.89445525 115.79258539 105.99258363 239.38105863 130.32619908]]
#..
#..robust standard errors (with ols)
#..with outliers
#.. HC0_se HC1_se HC2_se HC3_se'
#..[[ 3.30166123 3.42264107 3.4477148 3.60462409]
#.. [ 88.86635165 92.12260235 92.08368378 95.48159869]
#.. [ 6.94456348 7.19902694 7.19953754 7.47634779]
#.. [ 92.18777672 95.56573144 95.67211143 99.31427277]
#.. [ 212.9905298 220.79495237 221.08892661 229.57434782]]
#..
#..removing 2 outliers
#..[[ 2.57840843 2.67574088 2.68958007 2.80968452]
#.. [ 36.21720995 37.58437497 37.69555106 39.51362437]
#.. [ 3.1156149 3.23322638 3.27353882 3.49104794]
#.. [ 50.09789409 51.98904166 51.89530067 53.79478834]
#.. [ 94.27094886 97.82958699 98.25588281 102.60375381]]
#..
#..
#..'''
# a quick bootstrap analysis
# --------------------------
#
#(I didn't check whether this is fully correct statistically)
#**With OLS on full sample**
nobs, nvar = data.exog.shape
niter = 2000
bootres = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data.endog[rind]
exog = data.exog[rind,:]
res = sm.OLS(endog, exog).fit()
bootres[it, :nvar] = res.params
bootres[it, nvar:] = res.bse
np.set_print(options(linewidth=200))
print('Bootstrap Results of parameters and parameter standard deviation OLS')
print('Parameter estimates')
print('median', np.median(bootres[:,:5], 0))
print('mean ', np.mean(bootres[:,:5], 0))
print('std ', np.std(bootres[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootres[:,5:], 0))
print('mean ', np.mean(bootres[:,5:], 0))
print('std ', np.std(bootres[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootres[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap.png
plt.figtext(0.5, 0.935, 'OLS Bootstrap',
ha='center', color='black', weight='bold', size='large')
#**With WLS on sample with outliers removed**
data_endog = data.endog[olskeep]
data_exog = data.exog[olskeep,:]
incomesq_rm2 = incomesq[olskeep]
nobs, nvar = data_exog.shape
niter = 500 # a bit slow
bootreswls = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data_endog[rind]
exog = data_exog[rind,:]
res = sm.WLS(endog, exog, weights=1/incomesq[rind,:]).fit()
bootreswls[it, :nvar] = res.params
bootreswls[it, nvar:] = res.bse
print('Bootstrap Results of parameters and parameter standard deviation',)
print('WLS removed 2 outliers from sample')
print('Parameter estimates')
print('median', np.median(bootreswls[:,:5], 0))
print('mean ', np.mean(bootreswls[:,:5], 0))
print('std ', np.std(bootreswls[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootreswls[:,5:], 0))
print('mean ', np.mean(bootreswls[:,5:], 0))
print('std ', np.std(bootreswls[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootreswls[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap_rm2.png
plt.figtext(0.5, 0.935, 'WLS rm2 Bootstrap',
ha='center', color='black', weight='bold', size='large')
#..plt.show()
#..plt.close('all')
#::
#
# The following a random variables not fixed by a seed
#
# Bootstrap Results of parameters and parameter standard deviation
# OLS
#
# Parameter estimates
# median [ -3.26216383 228.52546429 -14.57239967 34.27155426 -227.02816597]
# mean [ -2.89855173 234.37139359 -14.98726881 27.96375666 -243.18361746]
# std [ 3.78704907 97.35797802 9.16316538 94.65031973 221.79444244]
#
# Standard deviation of parameter estimates
# median [ 5.44701033 81.96921398 7.58642431 80.64906783 200.19167735]
# mean [ 5.44840542 86.02554883 8.56750041 80.41864084 201.81196849]
# std [ 1.43425083 29.74806562 4.22063268 19.14973277 55.34848348]
#
# Bootstrap Results of parameters and parameter standard deviation
# WLS removed 2 outliers from sample
#
# Parameter estimates
# median [ -3.95876112 137.10419042 -9.29131131 88.40265447 -44.21091869]
# mean [ -3.67485724 135.42681207 -8.7499235 89.74703443 -46.38622848]
# std [ 2.96908679 56.36648967 7.03870751 48.51201918 106.92466097]
#
# Standard deviation of parameter estimates
# median [ 2.89349748 59.19454402 6.70583332 45.40987953 119.05241283]
# mean [ 2.97600894 60.14540249 6.92102065 45.66077486 121.35519673]
# std [ 0.55378808 11.77831934 1.69289179 7.4911526 23.72821085]
#
#
#
#Conclusion: problem with outliers and possibly heteroscedasticity
#-----------------------------------------------------------------
#
#in bootstrap results
#
#* bse in OLS underestimates the standard deviation of the parameters
# compared to standard deviation in bootstrap
#* OLS heteroscedasticity corrected standard errors for the original
# data (above) are close to bootstrap std
#* using WLS with 2 outliers removed has a relatively good match between
# the mean or median bse and the std of the parameter estimates in the
# bootstrap
#
#We could also include rsquared in bootstrap, and do it also for RLM.
#The problems could also mean that the linearity assumption is violated,
#e.g. try non-linear transformation of exog variables, but linear
#in parameters.
#
#
#for statsmodels
#
# * In this case rsquared for original data looks less random/arbitrary.
# * Don't change definition of rsquared from centered tss to uncentered
# tss when calculating rsquared in WLS if the original exog contains
# a constant. The increase in rsquared because of a change in definition
# will be very misleading.
# * Whether there is a constant in the transformed exog, wexog, or not,
# might affect also the degrees of freedom calculation, but I haven't
# checked this. I would guess that the df_model should stay the same,
# but needs to be verified with a textbook.
# * df_model has to be adjusted if the original data does not have a
# constant, e.g. when regressing an endog on a single exog variable
# without constant. This case might require also a redefinition of
# the rsquare and f statistic for the regression anova to use the
# uncentered tss.
# This can be done through keyword parameter to model.__init__ or
# through autodedection with hasconst = (exog.var(0)<1e-10).any()
# I'm not sure about fixed effects with a full dummy set but
# without a constant. In this case autodedection wouldn't work this
# way. Also, I'm not sure whether a ddof keyword parameter can also
# handle the hasconst case.
| bsd-3-clause |
glennq/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 137 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
ray-project/ray | python/ray/tune/examples/mnist_ptl_mini.py | 1 | 4427 | import math
import torch
from filelock import FileLock
from torch.nn import functional as F
from torchmetrics import Accuracy
import pytorch_lightning as pl
from pl_bolts.datamodules.mnist_datamodule import MNISTDataModule
import os
from ray.tune.integration.pytorch_lightning import TuneReportCallback
from ray import air, tune
class LightningMNISTClassifier(pl.LightningModule):
def __init__(self, config, data_dir=None):
super(LightningMNISTClassifier, self).__init__()
self.data_dir = data_dir or os.getcwd()
self.lr = config["lr"]
layer_1, layer_2 = config["layer_1"], config["layer_2"]
self.batch_size = config["batch_size"]
# mnist images are (1, 28, 28) (channels, width, height)
self.layer_1 = torch.nn.Linear(28 * 28, layer_1)
self.layer_2 = torch.nn.Linear(layer_1, layer_2)
self.layer_3 = torch.nn.Linear(layer_2, 10)
self.accuracy = Accuracy()
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = torch.relu(x)
x = self.layer_2(x)
x = torch.relu(x)
x = self.layer_3(x)
x = torch.log_softmax(x, dim=1)
return x
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
acc = self.accuracy(logits, y)
self.log("ptl/train_loss", loss)
self.log("ptl/train_accuracy", acc)
return loss
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
acc = self.accuracy(logits, y)
return {"val_loss": loss, "val_accuracy": acc}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()
self.log("ptl/val_loss", avg_loss)
self.log("ptl/val_accuracy", avg_acc)
def train_mnist_tune(config, num_epochs=10, num_gpus=0):
data_dir = os.path.abspath("./data")
model = LightningMNISTClassifier(config, data_dir)
with FileLock(os.path.expanduser("~/.data.lock")):
dm = MNISTDataModule(
data_dir=data_dir, num_workers=1, batch_size=config["batch_size"]
)
metrics = {"loss": "ptl/val_loss", "acc": "ptl/val_accuracy"}
trainer = pl.Trainer(
max_epochs=num_epochs,
# If fractional GPUs passed in, convert to int.
gpus=math.ceil(num_gpus),
enable_progress_bar=False,
callbacks=[TuneReportCallback(metrics, on="validation_end")],
)
trainer.fit(model, dm)
def tune_mnist(num_samples=10, num_epochs=10, gpus_per_trial=0):
config = {
"layer_1": tune.choice([32, 64, 128]),
"layer_2": tune.choice([64, 128, 256]),
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([32, 64, 128]),
}
trainable = tune.with_parameters(
train_mnist_tune, num_epochs=num_epochs, num_gpus=gpus_per_trial
)
tuner = tune.Tuner(
tune.with_resources(trainable, resources={"cpu": 1, "gpu": gpus_per_trial}),
tune_config=tune.TuneConfig(
metric="loss",
mode="min",
num_samples=num_samples,
),
run_config=air.RunConfig(
name="tune_mnist",
),
param_space=config,
)
results = tuner.fit()
print("Best hyperparameters found were: ", results.get_best_result().config)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using Ray Client.",
)
args, _ = parser.parse_known_args()
if args.smoke_test:
tune_mnist(num_samples=1, num_epochs=1, gpus_per_trial=0)
else:
if args.server_address:
import ray
ray.init(f"ray://{args.server_address}")
tune_mnist(num_samples=10, num_epochs=10, gpus_per_trial=0)
| apache-2.0 |
SerialShadow/SickRage | lib/guessit/plugins/transformers.py | 33 | 9580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from pkg_resources import EntryPoint
from guessit.options import reload as reload_options
from stevedore import ExtensionManager
from stevedore.extension import Extension
log = getLogger(__name__)
class Transformer(object): # pragma: no cover
def __init__(self, priority=0):
self.priority = priority
self.log = getLogger(self.name)
@property
def name(self):
return self.__class__.__name__
def supported_properties(self):
return {}
def second_pass_options(self, mtree, options=None):
return None
def should_process(self, mtree, options=None):
return True
def process(self, mtree, options=None):
pass
def post_process(self, mtree, options=None):
pass
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
pass
def rate_quality(self, guess, *props):
return 0
class CustomTransformerExtensionManager(ExtensionManager):
def __init__(self, namespace='guessit.transformer', invoke_on_load=True,
invoke_args=(), invoke_kwds={}, propagate_map_exceptions=True, on_load_failure_callback=None,
verify_requirements=False):
super(CustomTransformerExtensionManager, self).__init__(namespace=namespace,
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements)
@staticmethod
def order_extensions(extensions):
"""Order the loaded transformers
It should follow those rules
- website before language (eg: tvu.org.ru vs russian)
- language before episodes_rexps
- properties before language (eg: he-aac vs hebrew)
- release_group before properties (eg: XviD-?? vs xvid)
"""
extensions.sort(key=lambda ext: -ext.obj.priority)
return extensions
@staticmethod
def _load_one_plugin(ep, invoke_on_load, invoke_args, invoke_kwds, verify_requirements=True):
if not ep.dist:
# `require` argument of ep.load() is deprecated in newer versions of setuptools
if hasattr(ep, 'resolve'):
plugin = ep.resolve()
elif hasattr(ep, '_load'):
plugin = ep._load()
else:
plugin = ep.load(require=False)
else:
plugin = ep.load()
if invoke_on_load:
obj = plugin(*invoke_args, **invoke_kwds)
else:
obj = None
return Extension(ep.name, ep, plugin, obj)
def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds, verify_requirements):
return self.order_extensions(super(CustomTransformerExtensionManager, self)._load_plugins(invoke_on_load, invoke_args, invoke_kwds, verify_requirements))
def objects(self):
return self.map(self._get_obj)
@staticmethod
def _get_obj(ext):
return ext.obj
def object(self, name):
try:
return self[name].obj
except KeyError:
return None
def register_module(self, name=None, module_name=None, attrs=(), entry_point=None):
if entry_point:
ep = EntryPoint.parse(entry_point)
else:
ep = EntryPoint(name, module_name, attrs)
loaded = self._load_one_plugin(ep, invoke_on_load=True, invoke_args=(), invoke_kwds={})
if loaded:
self.extensions.append(loaded)
self.extensions = self.order_extensions(self.extensions)
self._extensions_by_name = None
class DefaultTransformerExtensionManager(CustomTransformerExtensionManager):
@property
def _internal_entry_points(self):
return ['split_path_components = guessit.transfo.split_path_components:SplitPathComponents',
'guess_filetype = guessit.transfo.guess_filetype:GuessFiletype',
'split_explicit_groups = guessit.transfo.split_explicit_groups:SplitExplicitGroups',
'guess_date = guessit.transfo.guess_date:GuessDate',
'guess_website = guessit.transfo.guess_website:GuessWebsite',
'guess_release_group = guessit.transfo.guess_release_group:GuessReleaseGroup',
'guess_properties = guessit.transfo.guess_properties:GuessProperties',
'guess_language = guessit.transfo.guess_language:GuessLanguage',
'guess_video_rexps = guessit.transfo.guess_video_rexps:GuessVideoRexps',
'guess_episodes_rexps = guessit.transfo.guess_episodes_rexps:GuessEpisodesRexps',
'guess_weak_episodes_rexps = guessit.transfo.guess_weak_episodes_rexps:GuessWeakEpisodesRexps',
'guess_bonus_features = guessit.transfo.guess_bonus_features:GuessBonusFeatures',
'guess_year = guessit.transfo.guess_year:GuessYear',
'guess_country = guessit.transfo.guess_country:GuessCountry',
'guess_idnumber = guessit.transfo.guess_idnumber:GuessIdnumber',
'split_on_dash = guessit.transfo.split_on_dash:SplitOnDash',
'guess_episode_info_from_position = guessit.transfo.guess_episode_info_from_position:GuessEpisodeInfoFromPosition',
'guess_movie_title_from_position = guessit.transfo.guess_movie_title_from_position:GuessMovieTitleFromPosition',
'guess_episode_details = guessit.transfo.guess_episode_details:GuessEpisodeDetails',
'expected_series = guessit.transfo.expected_series:ExpectedSeries',
'expected_title = guessit.transfo.expected_title:ExpectedTitle',]
def _find_entry_points(self, namespace):
entry_points = {}
# Internal entry points
if namespace == self.namespace:
for internal_entry_point_str in self._internal_entry_points:
internal_entry_point = EntryPoint.parse(internal_entry_point_str)
entry_points[internal_entry_point.name] = internal_entry_point
# Package entry points
setuptools_entrypoints = super(DefaultTransformerExtensionManager, self)._find_entry_points(namespace)
for setuptools_entrypoint in setuptools_entrypoints:
entry_points[setuptools_entrypoint.name] = setuptools_entrypoint
return list(entry_points.values())
_extensions = None
def all_transformers():
return _extensions.objects()
def get_transformer(name):
return _extensions.object(name)
def add_transformer(name, module_name, class_name):
"""
Add a transformer
:param name: the name of the transformer. ie: 'guess_regexp_id'
:param name: the module name. ie: 'flexget.utils.parsers.transformers.guess_regexp_id'
:param class_name: the class name. ie: 'GuessRegexpId'
"""
_extensions.register_module(name, module_name, (class_name,))
def add_transformer(entry_point):
"""
Add a transformer
:param entry_point: entry point spec format. ie: 'guess_regexp_id = flexget.utils.parsers.transformers.guess_regexp_id:GuessRegexpId'
"""
_extensions.register_module(entry_point = entry_point)
def reload(custom=False):
"""
Reload extension manager with default or custom one.
:param custom: if True, custom manager will be used, else default one.
Default manager will load default extensions from guessit and setuptools packaging extensions
Custom manager will not load default extensions from guessit, using only setuptools packaging extensions.
:type custom: boolean
"""
global _extensions
if custom:
_extensions = CustomTransformerExtensionManager()
else:
_extensions = DefaultTransformerExtensionManager()
reload_options(all_transformers())
reload()
| gpl-3.0 |
pligor/predicting-future-product-prices | 03_good_deal/relevant_deals.py | 1 | 3248 | # -*- coding: UTF-8 -*-
from __future__ import division
import numpy as np
import pandas as pd
import sys
import math
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import re
import os
import csv
from sklearn.neighbors import NearestNeighbors
def printgr(obj):
print repr(obj).decode('unicode-escape')
class RelevantDeals(object):
def __init__(self, df_all=None, deal_df=None):
super(RelevantDeals, self).__init__()
if df_all is None:
df_all = pd.concat((pd.read_csv('../mobiles_03_train.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL),
pd.read_csv('../mobiles_03_test.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL)), axis=0)
if deal_df is None:
deal_df = pd.read_csv('../mobiles_04_deals_display.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL)
assert set(df_all.index) == set(deal_df.index), "they must have the same index"
self.df_all = df_all
self.deal_df = deal_df
self.nn = NearestNeighbors(n_neighbors=len(df_all), # for simple nearest neighbors,not play a role
p=2, # minkowski distance
# radius=1., leaf_size=30, #for simple nearest neighbors it does not play a role
)
self.nn.fit(df_all)
def getSome(self, target_ind):
"""target_ind is pandas index"""
curX = self.df_all.loc[target_ind].values[np.newaxis]
neighbors_sorted = self.nn.kneighbors(X=curX, # we drop the first because it is itself
n_neighbors=len(self.df_all), return_distance=False).flatten()[1:]
neighbors_sorted_inds = [self.df_all.index[neighbor] for neighbor in neighbors_sorted]
cur_deal_metric = self.deal_df.loc[target_ind]['deal_metric']
better_deals = self.deal_df[self.deal_df['deal_metric'] > cur_deal_metric]
better_deal_inds = better_deals.index
# filtering out only better deals without losing neighbor order
relevant_better_deals = np.array([neighbor_ind for neighbor_ind in neighbors_sorted_inds
if neighbor_ind in better_deal_inds])
return relevant_better_deals
if __name__ == "__main__":
df_deal = pd.read_csv('../mobiles_04_deals_display.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL)
df = pd.concat((pd.read_csv('../mobiles_03_train.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL),
pd.read_csv('../mobiles_03_test.csv', index_col=0, encoding='utf-8',
quoting=csv.QUOTE_ALL)), axis=0)
rd = RelevantDeals(deal_df=df_deal, df_all=df)
#targetIndex = df.index[347] # Apple iphone 6 (16GB)
leeco = df_deal[["LeEco Le Max 2 (128GB)" in cur_name for cur_name in df_deal['display_name']]]
targetIndex = leeco.index[0]
print df_deal.loc[targetIndex]
print rd.getSome(target_ind=targetIndex)[:10]
| agpl-3.0 |
aflaxman/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 56 | 7229 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
fanannan/ClusteredClassifier | clustered_classifier.py | 1 | 6353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import copy
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
# This is an experimental classifier
# It assumes using a binary classifier which has predic_prob()
# Please cite me if you use this code and/or idea.
class ClusteredClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, base_clusterizer, base_classifier, num_iter=10, verbose=False, *args, **keywords):
self.base_clusterizer = base_clusterizer
self.base_classifier = base_classifier
self.minimum_samples_per_cluster = 3
self.minimum_train_accuracy = 0.5
self.num_iter = num_iter
self.verbose = verbose
self.labels = list()
self.models = list()
def fit(self, x_train, y_train):
self.labels = sorted(list(set(y_train)))
models = list()
for itr in xrange(self.num_iter):
models.append(self.fit_sub(x_train, y_train, itr))
# todo: pick up top n models
self.models = models
return models
def fit_sub(self, x_train, y_train, itr):
if self.verbose:
print('running clusutering #{0}'.format(itr+1), file=sys.stderr)
clusterizer = copy.deepcopy(self.base_clusterizer)
clusterizer.fit(x_train)
cluster_numbers = clusterizer.predict(x_train)
num_clusters = len(set(cluster_numbers))
classifiers = list()
for num in xrange(num_clusters):
if self.verbose:
print('running classification #{0}-{1}'.format(itr+1, num), file=sys.stderr)
x_train_sub, y_train_sub = extract(x_train, y_train, cluster_numbers, num)
if len(x_train_sub) > self.minimum_samples_per_cluster:
classifier = copy.deepcopy(self.base_classifier)
classifier.fit(x_train_sub, y_train_sub)
classes = classifier.classes_
accuracy = classifier.score(x_train_sub, y_train_sub)
else:
classifier = None
classes = []
accuracy = np.NaN
classifiers.append((classifier, classes, accuracy,))
if self.verbose:
print('training classe labels #{0}-{1}: {2}'.format(itr+1, num, classes), file=sys.stderr)
print('training accuracy #{0}-{1}: {2}'.format(itr+1, num, accuracy), file=sys.stderr)
return clusterizer, classifiers
def predict_probas(self, x_test):
probas_list = list()
for features in x_test:
x = np.array([features])
probas = list()
for model in self.models: # slow!
clusterizer, classifiers = model
num = clusterizer.predict(x)
classifier = classifiers[num][0]
classes = classifiers[num][1]
if not (classifier is None):
proba_set = (classifier.predict_proba(x), classes)
probas.append(proba_set)
probas_list.append(probas)
return probas_list
def predict_proba(self, x_test, remove_outliers=False):
#
def calc(px):
if len(px) == 0:
return 0.0
h = max(px)
l = min(px)
s = np.sum(px)
return (s-h-l)/(len(px)-2) if remove_outliers else s/len(px)
#
def calc_probas(ps):
if ps is None or len(ps) == 0:
return None
pred_dics = dict()
for c in self.labels:
pred_dics[c] = list()
for preds_, classes in ps:
for p, c in zip(preds_[0], classes):
pred_dics[c].append(p)
# print(pred_dics)
raw_result = list()
for c in self.labels:
raw_result.append(calc(pred_dics[c]))
s = np.sum(raw_result)
normalized_probas = [x/s for x in raw_result]
return np.array(normalized_probas)
probas_list = self.predict_probas(x_test)
proba_list = [calc_probas(ps) for ps in probas_list]
return np.array(proba_list)
def predict(self, x_test, remove_outliers=False):
proba_list = self.predict_proba(x_test, remove_outliers)
return np.array([self.labels[np.argmax(p)] for p in proba_list])
# investigate the worst performing clusters
def investigate(self, x_test, y_test):
pass
def extract(x_train, y_train, numbers, num):
selected = [i for i, x in enumerate(numbers) if num == x]
conditions = [num == x for x in numbers]
x_train_sub = x_train[selected, :]
y_train_sub = np.extract(conditions, y_train)
return x_train_sub, y_train_sub
if True:
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
COSINE_SIMILARITY = True
if COSINE_SIMILARITY:
from sklearn.cluster import k_means_
from scipy.spatial.distance import cdist
def new_euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
return cdist(X, Y, 'cosine')
# MONKEY PATCH
k_means_.euclidean_distances = new_euclidean_distances
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.5)
print("ClusteredClassifier")
cc = ClusteredClassifier(KMeans(n_clusters=6),
RandomForestClassifier(n_estimators=50, class_weight='balanced'),
num_iter=3,
verbose=True)
cc.fit(x_train, y_train)
y_pred_train = cc.predict(x_train)
print(classification_report(y_train, y_pred_train))
y_pred_test = cc.predict(x_test)
print(classification_report(y_test, y_pred_test))
print("RandomForest")
rf = RandomForestClassifier(n_estimators=50, class_weight='balanced')
rf.fit(x_train, y_train)
y_pred_train = rf.predict(x_train)
print(classification_report(y_train, y_pred_train))
y_pred_test = rf.predict(x_test)
print(classification_report(y_test, y_pred_test))
| apache-2.0 |
scenarios/tensorflow | tensorflow/examples/how_tos/reading_data/convert_to_records.py | 95 | 3193 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def main(unused_argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False,
validation_size=FLAGS.validation_size)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--directory',
type=str,
default='/tmp/data',
help='Directory to download data files and write the converted result'
)
parser.add_argument(
'--validation_size',
type=int,
default=5000,
help="""\
Number of examples to separate from the training data for the validation
set.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ruby-dlib/ruby-dlib | ext/dlib-19.4/python_examples/train_shape_predictor.py | 10 | 5998 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example program shows how to use dlib's implementation of the paper:
# One Millisecond Face Alignment with an Ensemble of Regression Trees by
# Vahid Kazemi and Josephine Sullivan, CVPR 2014
#
# In particular, we will train a face landmarking model based on a small
# dataset and then evaluate it. If you want to visualize the output of the
# trained model on some images then you can run the
# face_landmark_detection.py example program with predictor.dat as the input
# model.
#
# It should also be noted that this kind of model, while often used for face
# landmarking, is quite general and can be used for a variety of shape
# prediction tasks. But here we demonstrate it only on a simple face
# landmarking task.
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install scikit-image
# Or downloaded from http://scikit-image.org/download.html.
import os
import sys
import glob
import dlib
from skimage import io
# In this example we are going to train a face detector based on the small
# faces dataset in the examples/faces directory. This means you need to supply
# the path to this faces folder as a command line argument so we will know
# where it is.
if len(sys.argv) != 2:
print(
"Give the path to the examples/faces directory as the argument to this "
"program. For example, if you are in the python_examples folder then "
"execute this program by running:\n"
" ./train_shape_predictor.py ../examples/faces")
exit()
faces_folder = sys.argv[1]
options = dlib.shape_predictor_training_options()
# Now make the object responsible for training the model.
# This algorithm has a bunch of parameters you can mess with. The
# documentation for the shape_predictor_trainer explains all of them.
# You should also read Kazemi's paper which explains all the parameters
# in great detail. However, here I'm just setting three of them
# differently than their default values. I'm doing this because we
# have a very small dataset. In particular, setting the oversampling
# to a high amount (300) effectively boosts the training set size, so
# that helps this example.
options.oversampling_amount = 300
# I'm also reducing the capacity of the model by explicitly increasing
# the regularization (making nu smaller) and by using trees with
# smaller depths.
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True
# dlib.train_shape_predictor() does the actual training. It will save the
# final predictor to predictor.dat. The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(faces_folder, "training_with_face_landmarks.xml")
dlib.train_shape_predictor(training_xml_path, "predictor.dat", options)
# Now that we have a model we can test it. dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(
dlib.test_shape_predictor(training_xml_path, "predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on. We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good. Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml")
print("Testing accuracy: {}".format(
dlib.test_shape_predictor(testing_xml_path, "predictor.dat")))
# Now let's use it as you would in a normal application. First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor("predictor.dat")
detector = dlib.get_frontal_face_detector()
# Now let's run the detector and shape_predictor over the images in the faces
# folder and display the results.
print("Showing detections and predictions on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
print("Processing file: {}".format(f))
img = io.imread(f)
win.clear_overlay()
win.set_image(img)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
shape.part(1)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
dlib.hit_enter_to_continue()
| mit |
stefanwebb/tensorflow-models | tensorflow_models/__init__.py | 1 | 11501 | # MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys, six
import importlib
from enum import Enum
import tensorflow as tf
import numpy as np
import tensorflow_datasets as tf_data
from tensorflow_datasets.utils.list import wrap
import tensorflow_models.optimizers
import tensorflow_models.contexts
import tensorflow_models.layers
class GraphKeys(object):
INPUTS = 'inputs'
PRIOR = 'prior'
PLACEHOLDERS = 'placeholders'
OUTPUTS = 'outputs'
ENCODERS = 'encoders'
DECODERS = 'decoders'
LOSSES = 'losses'
INFERENCE = 'inference'
# Flatten out the HWC dimensions of a tensor
def flatten(x):
if len(x.shape) > 2:
x = tf.reshape(x, [int(x.shape[0]), -1])
return x
# Sum out the HWC dimensions of a tensor
def reduce_sum(x):
return tf.reduce_sum(flatten(x), 1)
# Return the scale of samples (which is [0, 1] unless transformations have been applied)
def sample_scale(settings):
scale = [0, 1]
if 'transformations' in settings:
for k, v in six.viewitems(settings['transformations']):
if k == 'rescale':
scale = list(v)
return scale
# Gets the shape of the tensor holding an unflattened minibatch => (batch x channels x height x width)
def unflattened_batchshape(settings):
return [settings['batch_size']] + tf_data.unflattened_sample_shape(settings)
def flattened_shape(settings):
return [int(np.prod(tf_data.unflattened_sample_shape(settings)))]
def flattened_batchshape(settings):
return [settings['batch_size']] + flattened_shape(settings)
def batchshape(settings):
if 'flatten' in settings['transformations']:
return flattened_batchshape(settings)
else:
return unflattened_batchshape(settings)
def safe_log(x, **kwargs):
#return tf.log(x + 1e-16, **kwargs)
return tf.log(x + 1e-8, **kwargs)
def count_batches(settings, subset=None):
if not subset is None:
return tf_data.count(settings['dataset'], subset) // settings['batch_size']
else:
train_batches = count_batches(settings, tf_data.Subset.TRAIN)
test_batches = count_batches(settings, tf_data.Subset.TEST)
return train_batches, test_batches
def global_step():
return tf.contrib.framework.get_or_create_global_step()
def local_step(settings, name='local_step', start=0):
with cpu_device(settings):
step = tf.Variable(start, name=name, trainable=False)
def host():
return tf.device("/cpu:0")
def device(settings):
return tf.device("/" + settings['device'])
def create(settings, placeholders=False):
with host():
if not placeholders:
input_ops(settings)
else:
input_placeholders(settings)
with device(settings):
model_ops(settings)
loss_ops(settings)
inference_ops(settings)
def trainer(settings):
lib = importlib.import_module('tensorflow_models.trainers.' + settings['trainer'])
return lib.Trainer
# TODO: Would it be better to expand the settings dictionary when it is called and have named arguments?
def input_ops(settings):
with tf.name_scope('inputs/train'):
train_samples = tf_data.inputs(
name=settings['dataset'],
subset=tf_data.Subset.TRAIN,
return_labels=settings['labels'],
batch_size=settings['batch_size'],
num_threads=settings['num_threads'],
transformations=settings['transformations'])
for x in wrap(train_samples):
tf.add_to_collection(GraphKeys.INPUTS, x)
if not settings['model'] == 'gan':
with tf.name_scope('inputs/test'):
test_samples = tf_data.inputs(
name=settings['dataset'],
subset=tf_data.Subset.TEST,
return_labels=settings['labels'],
batch_size=settings['batch_size'],
num_threads=settings['num_threads'],
transformations=settings['transformations'])
for x in wrap(test_samples):
tf.add_to_collection(GraphKeys.INPUTS, x)
#return train_samples, test_samples
def input_placeholders(settings):
#count_train = settings['count'][tf_data.Subset.TRAIN]
#count_test = settings['count'][tf_data.Subset.TEST]
with tf.name_scope('inputs/train'):
#print('sample_shape', sample_shape(settings), unflattened_sample_shape(settings))
train = tf.placeholder(dtype=tf.float32, shape=np.concatenate(([settings['batch_size']], tf_data.sample_shape(settings))), name='samples')
if settings['labels']:
train = [train, tf.placeholder(dtype=tf.float32, shape=[settings['batch_size'], 1], name='labels')]
for x in wrap(train):
tf.add_to_collection(GraphKeys.INPUTS, x)
with tf.name_scope('inputs/test'):
test = tf.placeholder(dtype=tf.float32, shape=np.concatenate(([settings['batch_size']], tf_data.sample_shape(settings))), name='samples')
if settings['labels']:
test = [test, tf.placeholder(dtype=tf.float32, shape=[settings['batch_size'], 1], name='labels')]
for x in wrap(test):
tf.add_to_collection(GraphKeys.INPUTS, x)
#print('train.shape', train.shape, 'test.shape', test.shape)
#print('train', train, 'test', test)
#raise Exception()
return train, test
def samples(subset=tf_data.Subset.TRAIN):
inputs = tf.get_collection(GraphKeys.INPUTS)
for op in inputs:
if tf_data.subset_suffix[subset] + '/samples' in op.name:
return op
return None
def labels(subset=tf_data.Subset.TRAIN):
inputs = tf.get_collection(GraphKeys.INPUTS)
for op in inputs:
if tf_data.subset_suffix[subset] + '/labels' in op.name:
return op
return None
# Get trainable variables with a given substring
def get_vars(name):
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
selected = []
for v in variables:
if name in v.name:
selected.append(v)
return v
def get_prior():
ops = tf.get_collection(GraphKeys.PRIOR)
if not ops is []:
return ops[0]
else:
raise ValueError('No prior sampling operation exists')
def lg_likelihood(settings):
model = importlib.import_module('tensorflow_models.models.' + settings['model'])
if 'lg_likelihood' in dir(model):
return model.lg_likelihood
else:
raise ValueError('No log-likelihood function exists')
def model(settings):
model_module = importlib.import_module('tensorflow_models.models.' + settings['model'])
return model_module
def lg_prior(settings):
model = importlib.import_module('tensorflow_models.models.' + settings['model'])
if 'lg_prior' in dir(model):
return model.lg_prior
else:
raise ValueError('No log-prior function exists')
def get_decoder():
ops = tf.get_collection(GraphKeys.DECODERS)
if not ops is []:
return ops[0]
else:
return None
#raise ValueError('No decoder sampling operation exists')
def get_encoder():
ops = tf.get_collection(GraphKeys.ENCODERS)
if not ops is []:
return ops[0]
else:
return None
#raise ValueError('No encoder sampling operation exists')
def get_output(name):
ops = tf.get_collection(GraphKeys.OUTPUTS)
for op in ops:
if name in op.name:
return op
raise ValueError('No output operation with substring "{}" exists'.format(name))
def get_loss(name):
ops = tf.get_collection(GraphKeys.LOSSES)
for op in ops:
if name in op.name:
return op
raise ValueError('No loss operation with substring "{}" exists'.format(name))
def get_inference(name):
ops = tf.get_collection(GraphKeys.INFERENCE)
for op in ops:
if name in op.name:
return op
raise ValueError('No inference operation with substring "{}" exists'.format(name))
def samples_placeholder():
placeholders = tf.get_collection(GraphKeys.PLACEHOLDERS)
for p in placeholders:
if 'samples' in p.name:
return p
return None
def train_placeholder():
placeholders = tf.get_collection(GraphKeys.INPUTS)
for p in placeholders:
if 'train/samples' in p.name:
return p
return None
def test_placeholder():
placeholders = tf.get_collection(GraphKeys.INPUTS)
for p in placeholders:
if 'test/samples' in p.name:
return p
return None
def codes_placeholder():
placeholders = tf.get_collection(GraphKeys.PLACEHOLDERS)
for p in placeholders:
if 'codes' in p.name:
return p
return None
def model_ops(settings):
model = importlib.import_module('tensorflow_models.models.' + settings['model'])
with tf.variable_scope('model'):
# Create and store an operation to sample from the prior
if 'create_prior' in dir(model):
with tf.name_scope('prior'):
tf.add_to_collection(GraphKeys.PRIOR, model.create_prior(settings))
if 'create_placeholders' in dir(model):
with tf.name_scope('placeholders'):
placeholders = model.create_placeholders(settings)
for p in wrap(placeholders):
tf.add_to_collection(GraphKeys.PLACEHOLDERS, p)
with tf.name_scope('train'):
probs = model.create_probs(settings, samples(tf_data.Subset.TRAIN), is_training=True)
for p in wrap(probs):
tf.add_to_collection(GraphKeys.OUTPUTS, p)
if not settings['model'] == 'gan':
with tf.name_scope('test'):
probs = model.create_probs(settings, samples(tf_data.Subset.TEST), is_training=False, reuse=True)
for p in wrap(probs):
tf.add_to_collection(GraphKeys.OUTPUTS, p)
if 'create_encoder' in dir(model):
tf.add_to_collection(GraphKeys.ENCODERS, model.create_encoder(settings, reuse=True))
if 'create_decoder' in dir(model):
tf.add_to_collection(GraphKeys.DECODERS, model.create_decoder(settings, reuse=True))
def loss_ops(settings):
loss_lib = importlib.import_module('tensorflow_models.losses.' + settings['loss'])
with tf.name_scope('losses'):
if not settings['model'] == 'gan':
ls = wrap(loss_lib.create('train', settings)) + wrap(loss_lib.create('test', settings))
else:
ls = wrap(loss_lib.create('train', settings))
for l in ls:
tf.add_to_collection(GraphKeys.LOSSES, l)
def inference_ops(settings):
inference_lib = importlib.import_module('tensorflow_models.inference.' + settings['inference'])
with tf.name_scope('inference'):
ops = wrap(inference_lib.create(settings))
for op in ops:
tf.add_to_collection(GraphKeys.INFERENCE, op)
def latentshape(settings):
return [settings['batch_size'], settings['latent_dimension']]
def noiseshape(settings):
return [settings['batch_size'], settings['noise_dimension']]
def standard_normal(shape, name='MultivariateNormalDiag'):
return tf.contrib.distributions.MultivariateNormalDiag(tf.zeros(shape), tf.ones(shape), name=name)
def standard_uniform(name='Uniform'):
return tf.contrib.distributions.Uniform(name=name)
def gan_uniform(name='Uniform'):
try:
return tf.contrib.distributions.Uniform(a=-1., b=1., name=name)
except:
return tf.contrib.distributions.Uniform(low=-1., high=1., name=name)
| mit |
Bashar/django | tests/generic_views/test_list.py | 12 | 10532 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.views.generic.base import View
from django.utils.encoding import force_str
from .models import Author, Artist
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
mrcslws/nupic.research | packages/lightning/src/nupic/research/frameworks/lightning/models/supervised_model.py | 3 | 8065 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pytorch_lightning as pl
import torch
from torch.backends import cudnn
from torch.nn.modules.batchnorm import _BatchNorm
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader
from nupic.research.frameworks.pytorch.lr_scheduler import ComposedLRScheduler
from nupic.research.frameworks.vernon.network_utils import create_model
__all__ = [
"SupervisedModel",
]
# Improves performance when using fixed size images (224) and CNN
cudnn.benchmark = True
class SupervisedModel(pl.LightningModule):
"""
General experiment class used to train neural networks in supervised
learning tasks.
"""
trainer_requirements = dict(
automatic_optimization=False, # Required for complexity_loss
)
def __init__(self, config):
super().__init__()
self.config = config
self._loss_function = config.get(
"loss_function", torch.nn.functional.cross_entropy
)
self.model = create_model(
model_class=config["model_class"],
model_args=config.get("model_args", {}),
init_batch_norm=config.get("init_batch_norm", False),
checkpoint_file=config.get("checkpoint_file", None),
load_checkpoint_args=config.get("load_checkpoint_args", {}),
)
self.epochs = config["epochs"]
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
optimizer = self.optimizers()
x, target = batch
output = self(x)
loss = self.error_loss(output, target)
self.log("train_loss", loss
# Flag that makes sense but slows things down:
# , sync_dist=True
# TODO: understand implications.
)
self.manual_backward(loss, optimizer)
complexity_loss = self.complexity_loss()
if complexity_loss is not None:
self.log("complexity_loss", complexity_loss)
self.manual_backward(complexity_loss, optimizer)
optimizer.step()
optimizer.zero_grad()
def validation_step(self, batch, batch_idx):
x, target = batch
out = self(x)
loss = self.error_loss(out, target)
pred = torch.argmax(out, dim=1)
val_acc = torch.sum(pred == target).float() / len(target)
# TODO: Logging these every step may be wasteful.
self.log("val_loss", loss, sync_dist=True)
self.log("val_acc", val_acc, sync_dist=True)
return loss
def configure_optimizers(self):
group_decay, group_no_decay = [], []
for module in self.model.modules():
for name, param in module.named_parameters(recurse=False):
if self.should_decay_parameter(module, name, param, self.config):
group_decay.append(param)
else:
group_no_decay.append(param)
optimizer_class = self.config.get("optimizer_class", torch.optim.SGD)
optimizer_args = self.config.get("optimizer_args", {})
optimizer = optimizer_class([dict(params=group_decay),
dict(params=group_no_decay,
weight_decay=0.)],
**optimizer_args)
lr_scheduler_class = self.config.get("lr_scheduler_class", None)
if lr_scheduler_class is not None:
lr_scheduler_args = self.config.get("lr_scheduler_args", {})
lr_scheduler_args = self.expand_lr_scheduler_args(
lr_scheduler_class, lr_scheduler_args)
lr_scheduler = lr_scheduler_class(optimizer,
**lr_scheduler_args)
if (self.config.get("lr_scheduler_step_every_batch", False)
or isinstance(lr_scheduler, (OneCycleLR, ComposedLRScheduler))):
lr_scheduler = dict(scheduler=lr_scheduler, interval="step")
return [optimizer], [lr_scheduler]
else:
return [optimizer]
def setup(self, stage):
self.train_dataset = self.load_dataset(self.config, train=True)
self.val_dataset = self.load_dataset(self.config, train=False)
def train_dataloader(self):
return self.create_train_loader(self.current_epoch)
def val_dataloader(self):
return DataLoader(
dataset=self.val_dataset,
batch_size=self.config.get("val_batch_size",
self.config.get("batch_size", 1)),
num_workers=self.config.get("workers", 0),
pin_memory=torch.cuda.is_available(),
)
#
# Utility methods
#
def expand_lr_scheduler_args(self, lr_scheduler_class, lr_scheduler_args):
"""
Return a new lr_scheduler_args with extra args inserted.
:param lr_scheduler_class: Class of lr-scheduler
:param lr_scheduler_args: User-specified args
:return: New lr_scheduler_args
"""
if issubclass(lr_scheduler_class, OneCycleLR):
# Update OneCycleLR parameters
epochs = lr_scheduler_args["epochs"]
lr_scheduler_args = {
**lr_scheduler_args,
"total_steps": sum(self.compute_steps_in_epoch(epoch)
for epoch in range(epochs)),
}
return lr_scheduler_args
def compute_steps_in_epoch(self, epoch):
"""
Get the number of optimizer steps in a given epoch.
:param epoch: Epoch number
:return: Number of optimizer steps
"""
return len(self.create_train_loader(epoch))
@classmethod
def load_dataset(cls, config, train=True):
dataset_class = config.get("dataset_class", None)
if dataset_class is None:
raise ValueError("Must specify 'dataset_class' in config.")
dataset_args = config.get("dataset_args", {})
dataset_args.update(train=train)
return dataset_class(**dataset_args)
def create_train_loader(self, epoch):
return DataLoader(
dataset=self.train_dataset,
batch_size=self.config.get("batch_size", 1),
num_workers=self.config.get("workers", 0),
pin_memory=torch.cuda.is_available(),
drop_last=self.config.get("train_loader_drop_last", True),
)
def should_decay_parameter(self, module, parameter_name, parameter, config):
if isinstance(module, _BatchNorm):
return config.get("batch_norm_weight_decay", True)
elif parameter_name == "bias":
return config.get("bias_weight_decay", True)
else:
return True
def error_loss(self, output, target, reduction="mean"):
"""
The error loss component of the loss function.
"""
return self._loss_function(output, target, reduction=reduction)
def complexity_loss(self):
"""
The model complexity component of the loss function.
"""
pass
| agpl-3.0 |
mikemccann/stoqs | loaders/IOOS/load_gliders.py | 5 | 3031 | #!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Loader for IOOS Glider DAC
Mike McCann
MBARI 22 April 2014
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that IOOS and DAPloaders are found
import logging
import datetime
from IOOS import IOOSLoader
from DAPloaders import runGliderLoader
from thredds_crawler.crawl import Crawl
logger = logging.getLogger('__main__')
il = IOOSLoader('stoqs_ioos_gliders', 'IOOS Gliders',
description = 'Glider data from the Integrated Ocean Observing System Glider DAC',
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Globe_1m_bath_10x/Globe_1m_bath_10x_scene.x3d': {
'position': '14051448.48336 -15407886.51486 6184041.22775',
'orientation': '0.83940 0.33030 0.43164 1.44880',
'centerOfRotation': '0 0 0',
'VerticalExaggeration': '10',
}
},
grdTerrain = os.path.join(parentDir, 'Globe_1m_bath.grd')
)
il.parms = ['temperature', 'salinity', 'density']
# Start and end dates of None will load entire archive
il.startDatetime = None
il.endDatetime = None
def loadGliders(loader, stride=1):
'''
Crawl the IOOS Glider TDS for OPeNDAP links of Time aggregated files and load into STOQS
'''
c = Crawl("http://tds.gliders.ioos.us/thredds/catalog.xml", select=[".*_Time$"])
urls = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]
colors = loader.colors.values()
for url in urls:
aName = url.split('/')[-1].split('.')[0]
pName = aName.replace('_Time', '')
if pName.find('-') != -1:
logger.warn("Replacing '-' characters in platform name %s with '_'s", pName)
pName = pName.replace('-', '_')
logger.info("Executing runGliderLoader with url = %s", url)
try:
runGliderLoader(url, loader.campaignName, il.campaignDescription, aName, pName, colors.pop(), 'glider', 'Glider Mission',
loader.parms, loader.dbAlias, stride, loader.startDatetime, loader.endDatetime, il.grdTerrain)
except Exception, e:
logger.error('%s. Skipping this dataset.', e)
# Execute the load
il.process_command_line()
if il.args.test:
loadGliders(il, stride=100)
elif il.args.optimal_stride:
loadGliders(il, stride=10)
else:
loadGliders(il, stride=il.args.stride)
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
il.addTerrainResources()
print "All Done."
| gpl-3.0 |
t0in4/django | tests/gis_tests/test_geoip2.py | 21 | 5666 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip2 import HAS_GEOIP2
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.utils import six
if HAS_GEOIP2:
from django.contrib.gis.geoip2 import GeoIP2, GeoIP2Exception
# Note: Requires both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoLite2-City.mmdb' and
# 'GeoLite2-City.mmdb'.
@skipUnless(HAS_GEOIP2 and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def test01_init(self):
"GeoIP initialization."
g1 = GeoIP2() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP2(path, 0) # Passing in data path explicitly.
g3 = GeoIP2.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLite2-City.mmdb')
cntry = os.path.join(path, 'GeoLite2-Country.mmdb')
g4 = GeoIP2(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP2(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIP2Exception, GeoIP2, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIP2Exception
else:
e = TypeError
self.assertRaises(e, GeoIP2, bad, 0)
def test02_bad_query(self):
"GeoIP query parameter checking."
cntry_g = GeoIP2(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIP2Exception, cntry_g.city, 'tmc.edu')
self.assertRaises(GeoIP2Exception, cntry_g.coords, 'tmc.edu')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP2)
def test03_country(self):
"GeoIP country querying methods."
g = GeoIP2(city='<foo>')
for query in (self.fqdn, self.addr):
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"GeoIP city querying methods."
g = GeoIP2(country='<foo>')
for query in (self.fqdn, self.addr):
# Country queries should still work.
self.assertEqual(
'US',
g.country_code(query),
'Failed for func country_code and query %s' % query
)
self.assertEqual(
'United States',
g.country_name(query),
'Failed for func country_name and query %s' % query
)
self.assertEqual(
{'country_code': 'US', 'country_name': 'United States'},
g.country(query)
)
# City information dictionary.
d = g.city(query)
self.assertEqual('US', d['country_code'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"GeoIP strings should be properly encoded (#16553)."
g = GeoIP2()
d = g.city("duesseldorf.de")
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test06_ipv6_query(self):
"GeoIP can lookup IPv6 addresses."
g = GeoIP2()
d = g.city('2002:81ed:c9a5::81ed:c9a5') # IPv6 address for www.nhm.ku.edu
self.assertEqual('US', d['country_code'])
self.assertEqual('Lawrence', d['city'])
self.assertEqual('KS', d['region'])
def test_repr(self):
path = settings.GEOIP_PATH
g = GeoIP2(path=path)
meta = g._reader.metadata()
version = '%s.%s' % (meta.binary_format_major_version, meta.binary_format_minor_version)
country_path = g._country_file
city_path = g._city_file
expected = '<GeoIP2 [v%(version)s] _country_file="%(country)s", _city_file="%(city)s">' % {
'version': version,
'country': country_path,
'city': city_path,
}
self.assertEqual(repr(g), expected)
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/datasets/copper/data.py | 28 | 2316 | """World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
| bsd-3-clause |
transedward/ml-playground | reinforcement/reinforce_baseline.py | 1 | 5380 | import numpy as np
from itertools import count
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from utils import plotting
class PolicyEstimator(nn.Module):
"""
Policy Function approximator.
"""
def __init__(self, D_in, D_out, hidden_size = 128):
super(PolicyEstimator, self).__init__()
# define network structure
self.W1 = nn.Linear(D_in, hidden_size)
self.W2 = nn.Linear(hidden_size, D_out)
def forward(self, state):
h = F.relu(self.W1(state))
action_scores = self.W2(h)
return F.softmax(action_scores)
class ValueEstimator(nn.Module):
"""
Value Function approximator.
"""
def __init__(self, D_in, hidden_size = 128):
super(ValueEstimator, self).__init__()
# define network structure
self.W1 = nn.Linear(D_in, hidden_size)
# output a score
self.W2 = nn.Linear(hidden_size, 1)
def forward(self, state):
h = F.relu(self.W1(state))
state_values = self.W2(h)
return state_values
def discount_rewards(rewards, gamma):
"""
take 1D float array of rewards and compute discounted reward
Reference: https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5#file-pg-pong-py-L18
"""
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(len(rewards))):
running_add = running_add * gamma + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
def reinforce_baseline(env, policy_estimator, policy_optimizer, value_estimator, value_optimizer,
num_episodes, discount_factor=1.0, render=True):
"""
REINFORCE (Monte Carlo Policy Gradient) Algorithm with Baseline.
Optimizes the policy function approximator using policy gradient.
Args:
env: OpenAI environment.
policy_estimator: Policy Function to be optimized
policy_optimizer: Optimizer for Policy Function
value_estimator: Value function approximator, used as a baseline
value_optimizer: Optimizer for Value Function
num_episodes: Number of episodes to run for
discount_factor: Time-discount factor
render: Render the training process or not
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
running_reward = 0
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
for i_episode in range(num_episodes):
episode_actions = []
episode_rewards = []
episode_baselines = []
state = env.reset()
for t in count(1):
state = torch.from_numpy(state).float().unsqueeze(0)
# Calculate the probability distribution of actions
probs = policy_estimator(Variable(state))
# Select action by distribution estimated above
action = probs.multinomial()
# Calculate state value as baseline
baseline = value_estimator(Variable(state))
state, reward, done, _ = env.step(action.data[0, 0])
if render:
env.render()
# Keep track of visited action, reward and baseline for later update
episode_actions.append(action)
episode_rewards.append(reward)
episode_baselines.append(baseline)
# update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
if done:
break
# start updating policy and value estimator
discount_rs = discount_rewards(episode_rewards, discount_factor)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discount_rs -= discount_rs.mean()
discount_rs /= discount_rs.std()
# define creterion and calculate loss for value funcion
value_target = Variable(torch.Tensor(discount_rs), requires_grad=False)
value_predict = torch.cat(episode_baselines)
value_loss = F.smooth_l1_loss(value_predict, value_target)
# Registers a reward obtained as a result of a stochastic process.
# Differentiating stochastic nodes requires providing them with reward value.
for baseline, action, r in zip(episode_baselines, episode_actions, discount_rs):
action.reinforce(r - baseline.data)
# Remove gradient from previous steps
policy_optimizer.zero_grad()
value_optimizer.zero_grad()
# Perform backward pass
torch.cat(episode_actions).backward()
value_loss.backward()
# Use optimizer to update
policy_optimizer.step()
value_optimizer.step()
# Book-keep the running reward
running_reward = running_reward * 0.99 + sum(episode_rewards) * 0.01
if i_episode % 10 == 0:
print('Episode {}\tRunning reward: {:.2f}'.format(i_episode, running_reward))
if running_reward > 200:
print("Solved! Running reward is now {} and " \
"the last episode runs to {} time steps!".format(running_reward, t))
break
return stats
| mit |
pyannote/pyannote-audio | pyannote/audio/pipelines/speaker_verification.py | 1 | 19476 | # MIT License
#
# Copyright (c) 2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
try:
from functools import cached_property
except ImportError:
from backports.cached_property import cached_property
from typing import Text, Union
import numpy as np
import torch
import torch.nn.functional as F
import torchaudio
from torch.nn.utils.rnn import pad_sequence
from pyannote.audio import Inference, Model, Pipeline
from pyannote.audio.core.io import AudioFile
from pyannote.audio.core.model import CACHE_DIR
from pyannote.audio.pipelines.utils import PipelineModel, get_devices, get_model
backend = torchaudio.get_audio_backend()
try:
from speechbrain.pretrained import (
EncoderClassifier as SpeechBrain_EncoderClassifier,
)
SPEECHBRAIN_IS_AVAILABLE = True
except ImportError:
SPEECHBRAIN_IS_AVAILABLE = False
finally:
torchaudio.set_audio_backend(backend)
try:
from nemo.collections.asr.models import (
EncDecSpeakerLabelModel as NeMo_EncDecSpeakerLabelModel,
)
NEMO_IS_AVAILABLE = True
except ImportError:
NEMO_IS_AVAILABLE = False
class NeMoPretrainedSpeakerEmbedding:
def __init__(
self,
embedding: Text = "nvidia/speakerverification_en_titanet_large",
device: torch.device = None,
):
if not NEMO_IS_AVAILABLE:
raise ImportError(
f"'NeMo' must be installed to use '{embedding}' embeddings. "
"Visit https://nvidia.github.io/NeMo/ for installation instructions."
)
super().__init__()
self.embedding = embedding
self.device = device
self.model_ = NeMo_EncDecSpeakerLabelModel.from_pretrained(self.embedding)
self.model_.freeze()
self.model_.to(self.device)
@cached_property
def sample_rate(self) -> int:
return self.model_._cfg.train_ds.get("sample_rate", 16000)
@cached_property
def dimension(self) -> int:
input_signal = torch.rand(1, self.sample_rate).to(self.device)
input_signal_length = torch.tensor([self.sample_rate]).to(self.device)
_, embeddings = self.model_(
input_signal=input_signal, input_signal_length=input_signal_length
)
_, dimension = embeddings.shape
return dimension
@cached_property
def metric(self) -> str:
return "cosine"
@cached_property
def min_num_samples(self) -> int:
lower, upper = 2, round(0.5 * self.sample_rate)
middle = (lower + upper) // 2
while lower + 1 < upper:
try:
input_signal = torch.rand(1, middle).to(self.device)
input_signal_length = torch.tensor([middle]).to(self.device)
_ = self.model_(
input_signal=input_signal, input_signal_length=input_signal_length
)
upper = middle
except RuntimeError:
lower = middle
middle = (lower + upper) // 2
return upper
def __call__(
self, waveforms: torch.Tensor, masks: torch.Tensor = None
) -> np.ndarray:
"""
Parameters
----------
waveforms : (batch_size, num_channels, num_samples)
Only num_channels == 1 is supported.
masks : (batch_size, num_samples), optional
Returns
-------
embeddings : (batch_size, dimension)
"""
batch_size, num_channels, num_samples = waveforms.shape
assert num_channels == 1
waveforms = waveforms.squeeze(dim=1)
if masks is None:
signals = waveforms.squeeze(dim=1)
wav_lens = signals.shape[1] * torch.ones(batch_size)
else:
batch_size_masks, _ = masks.shape
assert batch_size == batch_size_masks
# TODO: speed up the creation of "signals"
# preliminary profiling experiments show
# that it accounts for 15% of __call__
# (the remaining 85% being the actual forward pass)
imasks = F.interpolate(
masks.unsqueeze(dim=1), size=num_samples, mode="nearest"
).squeeze(dim=1)
imasks = imasks > 0.5
signals = pad_sequence(
[waveform[imask] for waveform, imask in zip(waveforms, imasks)],
batch_first=True,
)
wav_lens = imasks.sum(dim=1)
max_len = wav_lens.max()
# corner case: every signal is too short
if max_len < self.min_num_samples:
return np.NAN * np.zeros((batch_size, self.dimension))
too_short = wav_lens < self.min_num_samples
wav_lens[too_short] = max_len
_, embeddings = self.model_(
input_signal=waveforms.to(self.device),
input_signal_length=wav_lens.to(self.device),
)
embeddings = embeddings.cpu().numpy()
embeddings[too_short.cpu().numpy()] = np.NAN
return embeddings
class SpeechBrainPretrainedSpeakerEmbedding:
"""Pretrained SpeechBrain speaker embedding
Parameters
----------
embedding : str
Name of SpeechBrain model
device : torch.device, optional
Device
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
Usage
-----
>>> get_embedding = SpeechBrainPretrainedSpeakerEmbedding("speechbrain/spkrec-ecapa-voxceleb")
>>> assert waveforms.ndim == 3
>>> batch_size, num_channels, num_samples = waveforms.shape
>>> assert num_channels == 1
>>> embeddings = get_embedding(waveforms)
>>> assert embeddings.ndim == 2
>>> assert embeddings.shape[0] == batch_size
>>> assert binary_masks.ndim == 1
>>> assert binary_masks.shape[0] == batch_size
>>> embeddings = get_embedding(waveforms, masks=binary_masks)
"""
def __init__(
self,
embedding: Text = "speechbrain/spkrec-ecapa-voxceleb",
device: torch.device = None,
use_auth_token: Union[Text, None] = None,
):
if not SPEECHBRAIN_IS_AVAILABLE:
raise ImportError(
f"'speechbrain' must be installed to use '{embedding}' embeddings. "
"Visit https://speechbrain.github.io for installation instructions."
)
super().__init__()
self.embedding = embedding
self.device = device
self.classifier_ = SpeechBrain_EncoderClassifier.from_hparams(
source=self.embedding,
savedir=f"{CACHE_DIR}/speechbrain",
run_opts={"device": self.device},
use_auth_token=use_auth_token,
)
@cached_property
def sample_rate(self) -> int:
return self.classifier_.audio_normalizer.sample_rate
@cached_property
def dimension(self) -> int:
dummy_waveforms = torch.rand(1, 16000).to(self.device)
*_, dimension = self.classifier_.encode_batch(dummy_waveforms).shape
return dimension
@cached_property
def metric(self) -> str:
return "cosine"
@cached_property
def min_num_samples(self) -> int:
lower, upper = 2, round(0.5 * self.sample_rate)
middle = (lower + upper) // 2
while lower + 1 < upper:
try:
_ = self.classifier_.encode_batch(
torch.randn(1, middle).to(self.device)
)
upper = middle
except RuntimeError:
lower = middle
middle = (lower + upper) // 2
return upper
def __call__(
self, waveforms: torch.Tensor, masks: torch.Tensor = None
) -> np.ndarray:
"""
Parameters
----------
waveforms : (batch_size, num_channels, num_samples)
Only num_channels == 1 is supported.
masks : (batch_size, num_samples), optional
Returns
-------
embeddings : (batch_size, dimension)
"""
batch_size, num_channels, num_samples = waveforms.shape
assert num_channels == 1
waveforms = waveforms.squeeze(dim=1)
if masks is None:
signals = waveforms.squeeze(dim=1)
wav_lens = signals.shape[1] * torch.ones(batch_size)
else:
batch_size_masks, _ = masks.shape
assert batch_size == batch_size_masks
# TODO: speed up the creation of "signals"
# preliminary profiling experiments show
# that it accounts for 15% of __call__
# (the remaining 85% being the actual forward pass)
imasks = F.interpolate(
masks.unsqueeze(dim=1), size=num_samples, mode="nearest"
).squeeze(dim=1)
imasks = imasks > 0.5
signals = pad_sequence(
[waveform[imask] for waveform, imask in zip(waveforms, imasks)],
batch_first=True,
)
wav_lens = imasks.sum(dim=1)
max_len = wav_lens.max()
# corner case: every signal is too short
if max_len < self.min_num_samples:
return np.NAN * np.zeros((batch_size, self.dimension))
too_short = wav_lens < self.min_num_samples
wav_lens = wav_lens / max_len
wav_lens[too_short] = 1.0
embeddings = (
self.classifier_.encode_batch(signals, wav_lens=wav_lens)
.squeeze(dim=1)
.cpu()
.numpy()
)
embeddings[too_short.cpu().numpy()] = np.NAN
return embeddings
class PyannoteAudioPretrainedSpeakerEmbedding:
"""Pretrained pyannote.audio speaker embedding
Parameters
----------
embedding : PipelineModel
pyannote.audio model
device : torch.device, optional
Device
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
Usage
-----
>>> get_embedding = PyannoteAudioPretrainedSpeakerEmbedding("pyannote/embedding")
>>> assert waveforms.ndim == 3
>>> batch_size, num_channels, num_samples = waveforms.shape
>>> assert num_channels == 1
>>> embeddings = get_embedding(waveforms)
>>> assert embeddings.ndim == 2
>>> assert embeddings.shape[0] == batch_size
>>> assert masks.ndim == 1
>>> assert masks.shape[0] == batch_size
>>> embeddings = get_embedding(waveforms, masks=masks)
"""
def __init__(
self,
embedding: PipelineModel = "pyannote/embedding",
device: torch.device = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
self.embedding = embedding
self.device = device
self.model_: Model = get_model(self.embedding, use_auth_token=use_auth_token)
self.model_.eval()
self.model_.to(self.device)
@cached_property
def sample_rate(self) -> int:
return self.model_.audio.sample_rate
@cached_property
def dimension(self) -> int:
return self.model_.introspection.dimension
@cached_property
def metric(self) -> str:
return "cosine"
@cached_property
def min_num_samples(self) -> int:
return self.model_.introspection.min_num_samples
def __call__(
self, waveforms: torch.Tensor, masks: torch.Tensor = None
) -> np.ndarray:
with torch.no_grad():
if masks is None:
embeddings = self.model_(waveforms.to(self.device))
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
embeddings = self.model_(
waveforms.to(self.device), weights=masks.to(self.device)
)
return embeddings.cpu().numpy()
def PretrainedSpeakerEmbedding(
embedding: PipelineModel,
device: torch.device = None,
use_auth_token: Union[Text, None] = None,
):
"""Pretrained speaker embedding
Parameters
----------
embedding : Text
Can be a SpeechBrain (e.g. "speechbrain/spkrec-ecapa-voxceleb")
or a pyannote.audio model.
device : torch.device, optional
Device
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
Usage
-----
>>> get_embedding = PretrainedSpeakerEmbedding("pyannote/embedding")
>>> get_embedding = PretrainedSpeakerEmbedding("speechbrain/spkrec-ecapa-voxceleb")
>>> get_embedding = PretrainedSpeakerEmbedding("nvidia/speakerverification_en_titanet_large")
>>> assert waveforms.ndim == 3
>>> batch_size, num_channels, num_samples = waveforms.shape
>>> assert num_channels == 1
>>> embeddings = get_embedding(waveforms)
>>> assert embeddings.ndim == 2
>>> assert embeddings.shape[0] == batch_size
>>> assert masks.ndim == 1
>>> assert masks.shape[0] == batch_size
>>> embeddings = get_embedding(waveforms, masks=masks)
"""
if isinstance(embedding, str) and "speechbrain" in embedding:
return SpeechBrainPretrainedSpeakerEmbedding(
embedding, device=device, use_auth_token=use_auth_token
)
elif isinstance(embedding, str) and "nvidia" in embedding:
return NeMoPretrainedSpeakerEmbedding(embedding, device=device)
else:
return PyannoteAudioPretrainedSpeakerEmbedding(
embedding, device=device, use_auth_token=use_auth_token
)
class SpeakerEmbedding(Pipeline):
"""Speaker embedding pipeline
This pipeline assumes that each file contains exactly one speaker
and extracts one single embedding from the whole file.
Parameters
----------
embedding : Model, str, or dict, optional
Pretrained embedding model. Defaults to "pyannote/embedding".
See pyannote.audio.pipelines.utils.get_model for supported format.
segmentation : Model, str, or dict, optional
Pretrained segmentation (or voice activity detection) model.
See pyannote.audio.pipelines.utils.get_model for supported format.
Defaults to no voice activity detection.
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
Usage
-----
>>> from pyannote.audio.pipelines import SpeakerEmbedding
>>> pipeline = SpeakerEmbedding()
>>> emb1 = pipeline("speaker1.wav")
>>> emb2 = pipeline("speaker2.wav")
>>> from scipy.spatial.distance import cdist
>>> distance = cdist(emb1, emb2, metric="cosine")[0,0]
"""
def __init__(
self,
embedding: PipelineModel = "pyannote/embedding",
segmentation: PipelineModel = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
self.embedding = embedding
self.segmentation = segmentation
self.embedding_model_: Model = get_model(
embedding, use_auth_token=use_auth_token
)
if self.segmentation is None:
models = [self.embedding_model_]
else:
segmentation_model: Model = get_model(
self.segmentation, use_auth_token=use_auth_token
)
models = [self.embedding_model_, segmentation_model]
# send models to GPU (when GPUs are available and model is not already on GPU)
cpu_models = [model for model in models if model.device.type == "cpu"]
for cpu_model, gpu_device in zip(
cpu_models, get_devices(needs=len(cpu_models))
):
cpu_model.to(gpu_device)
if self.segmentation is not None:
self.voice_activity_ = Inference(
segmentation_model,
pre_aggregation_hook=lambda scores: np.max(
scores, axis=-1, keepdims=True
),
)
def apply(self, file: AudioFile) -> np.ndarray:
device = self.embedding_model_.device
# read audio file and send it to GPU
waveform = self.embedding_model_.audio(file)[0][None].to(device)
if self.segmentation is None:
weights = None
else:
# obtain voice activity scores
weights = self.voice_activity_(file).data
# HACK -- this should be fixed upstream
weights[np.isnan(weights)] = 0.0
weights = torch.from_numpy(weights**3)[None, :, 0].to(device)
# extract speaker embedding on parts of
with torch.no_grad():
return self.embedding_model_(waveform, weights=weights).cpu().numpy()
def main(
protocol: str = "VoxCeleb.SpeakerVerification.VoxCeleb1",
subset: str = "test",
embedding: str = "pyannote/embedding",
segmentation: str = None,
):
import typer
from pyannote.database import FileFinder, get_protocol
from pyannote.metrics.binary_classification import det_curve
from scipy.spatial.distance import cdist
from tqdm import tqdm
pipeline = SpeakerEmbedding(embedding=embedding, segmentation=segmentation)
protocol = get_protocol(protocol, preprocessors={"audio": FileFinder()})
y_true, y_pred = [], []
emb = dict()
trials = getattr(protocol, f"{subset}_trial")()
for t, trial in enumerate(tqdm(trials)):
audio1 = trial["file1"]["audio"]
if audio1 not in emb:
emb[audio1] = pipeline(audio1)
audio2 = trial["file2"]["audio"]
if audio2 not in emb:
emb[audio2] = pipeline(audio2)
y_pred.append(cdist(emb[audio1], emb[audio2], metric="cosine")[0][0])
y_true.append(trial["reference"])
_, _, _, eer = det_curve(y_true, np.array(y_pred), distances=True)
typer.echo(
f"{protocol.name} | {subset} | {embedding} | {segmentation} | EER = {100 * eer:.3f}%"
)
if __name__ == "__main__":
import typer
typer.run(main)
| mit |
Neural-Network/TicTacToe | pybrain/datasets/sequential.py | 21 | 8789 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
# $Id$
from scipy import ravel, r_
from random import sample
from pybrain.datasets.supervised import SupervisedDataSet
class EmptySequenceError(Exception): pass
class SequentialDataSet(SupervisedDataSet):
"""A SequentialDataSet is like a SupervisedDataSet except that it can keep
track of sequences of samples. Indices of a new sequence are stored whenever
the method newSequence() is called. The last (open) sequence is considered
a normal sequence even though it does not have a following "new sequence"
marker."""
def __init__(self, indim, targetdim):
SupervisedDataSet.__init__(self, indim, targetdim)
# add field that stores the beginning of a new episode
self.addField('sequence_index', 1)
self.append('sequence_index', 0)
self.currentSeq = 0
def newSequence(self):
"""Marks the beginning of a new sequence. this function does nothing if
called at the very start of the data set. Otherwise, it starts a new
sequence. Empty sequences are not allowed, and an EmptySequenceError
exception will be raised."""
length = self.getLength()
if length != 0:
if ravel(self.getField('sequence_index'))[-1] == length:
raise EmptySequenceError
self._appendUnlinked('sequence_index', length)
def _getSequenceField(self, index, field):
"""Return a sequence of one single field given by `field` and indexed by
`index`."""
seq = ravel(self.getField('sequence_index'))
if len(seq) == index + 1:
# user wants to access the last sequence, return until end of data
return self.getField(field)[seq[index]:]
if len(seq) < index + 1:
# sequence index beyond number of sequences. raise exception
raise IndexError('sequence does not exist.')
return self.getField(field)[seq[index]:seq[index + 1]]
def getSequence(self, index):
"""Returns the sequence given by `index`.
A list of arrays is returned for the linked arrays. It is assumed that
the last sequence goes until the end of the dataset."""
return [self._getSequenceField(index, l) for l in self.link]
def getSequenceIterator(self, index):
"""Return an iterator over the samples of the sequence specified by
`index`.
Each element is a tuple."""
return zip(*self.getSequence(index))
def endOfSequence(self, index):
"""Return True if the marker was moved over the last element of
sequence `index`, False otherwise.
Mostly used like .endOfData() with while loops."""
seq = ravel(self.getField('sequence_index'))
if len(seq) == index + 1:
# user wants to access the last sequence, return until end of data
return self.endOfData()
if len(seq) < index + 1:
# sequence index beyond number of sequences. raise exception
raise IndexError('sequence does not exist.')
else:
return self.index >= seq[index + 1]
def gotoSequence(self, index):
"""Move the internal marker to the beginning of sequence `index`."""
try:
self.index = ravel(self.getField('sequence_index'))[index]
except IndexError:
raise IndexError('sequence does not exist')
def getCurrentSequence(self):
"""Return the current sequence, according to the marker position."""
seq = ravel(self.getField('sequence_index'))
return len(seq) - sum(seq > self.index) - 1
def getNumSequences(self):
"""Return the number of sequences. The last (open) sequence is also
counted in, even though there is no additional 'newSequence' marker."""
return self.getField('sequence_index').shape[0]
def getSequenceLength(self, index):
"""Return the length of the given sequence. If `index` is pointing
to the last sequence, the sequence is considered to go until the end
of the dataset."""
seq = ravel(self.getField('sequence_index'))
if len(seq) == index + 1:
# user wants to access the last sequence, return until end of data
return int(self.getLength() - seq[index])
if len(seq) < index + 1:
# sequence index beyond number of sequences. raise exception
raise IndexError('sequence does not exist.')
return int(seq[index + 1] - seq[index])
def removeSequence(self, index):
"""Remove the `index`'th sequence from the dataset and places the
marker to the sample following the removed sequence."""
if index >= self.getNumSequences():
# sequence doesn't exist, raise exception
raise IndexError('sequence does not exist.')
sequences = ravel(self.getField('sequence_index'))
seqstart = sequences[index]
if index == self.getNumSequences() - 1:
# last sequence is going to be removed
lastSeqDeleted = True
seqend = self.getLength()
else:
lastSeqDeleted = False
# sequence to remove is not last one (sequence_index exists)
seqend = sequences[index + 1]
# cut out data from all fields
for label in self.link:
# concatenate rows from start to seqstart and from seqend to end
self.data[label] = r_[self.data[label][:seqstart, :], self.data[label][seqend:, :]]
# update endmarkers of linked fields
self.endmarker[label] -= seqend - seqstart
# update sequence indices
for i, val in enumerate(sequences):
if val > seqstart:
self.data['sequence_index'][i, :] -= seqend - seqstart
# remove sequence index of deleted sequence and reduce its endmarker
self.data['sequence_index'] = r_[self.data['sequence_index'][:index, :], self.data['sequence_index'][index + 1:, :]]
self.endmarker['sequence_index'] -= 1
if lastSeqDeleted:
# last sequence was removed
# move sequence marker to last remaining sequence
self.currentSeq = index - 1
# move sample marker to end of dataset
self.index = self.getLength()
# if there was only 1 sequence left, re-initialize sequence index
if self.getLength() == 0:
self.clear()
else:
# removed sequence was not last one (sequence_index exists)
# move sequence marker to the new sequence at position 'index'
self.currentSeq = index
# move sample marker to beginning of sequence at position 'index'
self.index = ravel(self.getField('sequence_index'))[index]
def clear(self):
SupervisedDataSet.clear(self, True)
self._appendUnlinked('sequence_index', [0])
self.currentSeq = 0
def __iter__(self):
"""Create an iterator object over sequences which are themselves
iterable objects."""
for i in range(self.getNumSequences()):
yield self.getSequenceIterator(i)
def _provideSequences(self):
"""Return an iterator over sequence lists."""
return iter(map(list, iter(self)))
def evaluateModuleMSE(self, module, averageOver=1, **args):
"""Evaluate the predictions of a module on a sequential dataset
and return the MSE (potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
module.reset()
e, p = self._evaluateSequence(module.activate, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
res += totalError / ponderation
return res / averageOver
def splitWithProportion(self, proportion=0.5):
"""Produce two new datasets, each containing a part of the sequences.
The first dataset will have a fraction given by `proportion` of the
dataset."""
l = self.getNumSequences()
leftIndices = sample(list(range(l)), int(l * proportion))
leftDs = self.copy()
leftDs.clear()
rightDs = leftDs.copy()
index = 0
for seq in iter(self):
if index in leftIndices:
leftDs.newSequence()
for sp in seq:
leftDs.addSample(*sp)
else:
rightDs.newSequence()
for sp in seq:
rightDs.addSample(*sp)
index += 1
return leftDs, rightDs
| bsd-3-clause |
alex-ip/agdc | api-examples/source/main/python/workflow/observation_count.py | 1 | 6179 | #!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
import luigi
import numpy
import os
from gdalconst import GDT_Int16
from datacube.api.model import DatasetType, Pq25Bands
from datacube.api.utils import NDV, empty_array, PqaMask, get_dataset_data
from datacube.api.utils import get_dataset_metadata, raster_create
from datacube.api.workflow.cell import Workflow, SummaryTask, CellTask
_log = logging.getLogger()
class ObservationCountWorkflow(Workflow):
def __init__(self):
Workflow.__init__(self, name="Bare Soil Workflow")
def create_summary_tasks(self):
return [ObservationCountSummaryTask(x_min=self.x_min, x_max=self.x_max, y_min=self.y_min, y_max=self.y_max,
acq_min=self.acq_min, acq_max=self.acq_max, satellites=self.satellites,
output_directory=self.output_directory, csv=self.csv, dummy=self.dummy,
mask_pqa_apply=self.mask_pqa_apply, mask_pqa_mask=self.mask_pqa_mask)]
class ObservationCountSummaryTask(SummaryTask):
def create_cell_tasks(self, x, y):
return ObservationCountCellTask(x=x, y=y, acq_min=self.acq_min, acq_max=self.acq_max,
satellites=self.satellites,
output_directory=self.output_directory, csv=self.csv, dummy=self.dummy,
mask_pqa_apply=self.mask_pqa_apply, mask_pqa_mask=self.mask_pqa_mask)
class ObservationCountCellTask(CellTask):
def output(self):
from datacube.api.workflow import format_date
from datacube.api.utils import get_satellite_string
satellites = get_satellite_string(self.satellites)
acq_min = format_date(self.acq_min)
acq_max = format_date(self.acq_max)
filename = os.path.join(self.output_directory,
"{satellites}_OBSCOUNT_{x:03d}_{y:04d}_{acq_min}_{acq_max}.tif".format(
satellites=satellites,
x=self.x, y=self.y,
acq_min=acq_min,
acq_max=acq_max))
return luigi.LocalTarget(filename)
def run(self):
shape = (4000, 4000)
masks = [PqaMask.PQ_MASK_CLEAR,
PqaMask.PQ_MASK_SATURATION_OPTICAL,
PqaMask.PQ_MASK_SATURATION_THERMAL,
PqaMask.PQ_MASK_CONTIGUITY,
PqaMask.PQ_MASK_LAND,
PqaMask.PQ_MASK_CLOUD_ACCA,
PqaMask.PQ_MASK_CLOUD_FMASK,
PqaMask.PQ_MASK_CLOUD_SHADOW_ACCA,
PqaMask.PQ_MASK_CLOUD_SHADOW_FMASK]
observation_count = empty_array(shape=shape, dtype=numpy.int16, ndv=0)
observation_count_clear = dict()
for mask in masks:
observation_count_clear[mask] = empty_array(shape=shape, dtype=numpy.int16, ndv=0)
metadata = None
for tile in self.get_tiles():
# Get the PQA dataset
pqa = tile.datasets[DatasetType.PQ25]
data = get_dataset_data(pqa, [Pq25Bands.PQ])[Pq25Bands.PQ]
#
# Count any pixels that are no NDV - don't think we should actually have any but anyway
#
# Mask out any no data pixels - should actually be none but anyway
pqa = numpy.ma.masked_equal(data, NDV)
# Count the data pixels - i.e. pixels that were NOT masked out
observation_count += numpy.where(data.mask, 0, 1)
#
# Count and pixels that are not masked due to pixel quality
#
for mask in masks:
# Apply the particular pixel mask
pqm = numpy.ma.masked_where(numpy.bitwise_and(data, mask) != mask, data)
# Count the pixels that were not masked out
observation_count_clear[mask] += numpy.where(pqm.mask, 0, 1)
if not metadata:
metadata = get_dataset_metadata(pqa)
# Create the output dataset
raster_create(self.output().path, [observation_count] + [observation_count_clear[mask] for mask in masks],
metadata.transform, metadata.projection, NDV, GDT_Int16)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
ObservationCountWorkflow().run() | bsd-3-clause |
xubenben/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 284 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |