repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
benoitsteiner/tensorflow-xsmm | tensorflow/examples/get_started/regression/imports85.py | 39 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/contrib/data/python/kernel_tests/zip_dataset_op_test.py | 5 | 4387 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ZipDatasetTest(test.TestCase):
def testZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64)
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
]
zipped = dataset_ops.Dataset.zip(datasets)
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(
equal_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
variable_length_components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, variable_length_components)})
for i in range(2):
results = sess.run(get_next)
for component, result_component in zip(
variable_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64, shape=[4, 20]),
array_ops.placeholder(dtypes.int64, shape=[4, 22]),
array_ops.placeholder(dtypes.float64, shape=[4])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
]
zipped = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([20], get_next[0].shape)
self.assertEqual([22], get_next[1][0].shape)
self.assertEqual([], get_next[1][1].shape)
with self.test_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
result1, (result2, result3) = sess.run(get_next)
self.assertAllEqual(equal_length_components[0][i], result1)
self.assertAllEqual(equal_length_components[1][i], result2)
self.assertAllEqual(equal_length_components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mxjl620/scikit-learn | sklearn/utils/tests/test_validation.py | 79 | 18547 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
ageron/tensorflow | tensorflow/python/compiler/tensorrt/test/quantization_mnist_test.py | 3 | 11029 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.compiler.tensorrt.wrap_conversion import get_linked_tensorrt_version
from tensorflow.python.compiler.tensorrt.wrap_conversion import is_tensorrt_enabled
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import saver
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.training_util import get_global_step
INPUT_NODE_NAME = 'input'
OUTPUT_NODE_NAME = 'output'
class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
def _BuildGraph(self, x):
def _Quantize(x, r):
x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
return x
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
x = _Quantize(x, 1)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Reduce
x = math_ops.reduce_mean(x, [1, 2])
x = _Quantize(x, 6)
# FC1
x = _DenseLayer(x, 64, 512, 6, name='dense')
x = nn.relu6(x)
# FC2
x = _DenseLayer(x, 512, 10, 25, name='dense_1')
x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
return x
def _GetGraphDef(self, use_trt, max_batch_size, model_dir):
"""Get the frozen mnist GraphDef.
Args:
use_trt: whether use TF-TRT to convert the graph.
max_batch_size: the max batch size to apply during TF-TRT conversion.
model_dir: the model directory to load the checkpoints.
Returns:
The frozen mnist GraphDef.
"""
graph = ops.Graph()
with self.session(graph=graph) as sess:
with graph.device('/GPU:0'):
x = array_ops.placeholder(
shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME)
self._BuildGraph(x)
# Load weights
mnist_saver = saver.Saver()
checkpoint_file = latest_checkpoint(model_dir)
mnist_saver.restore(sess, checkpoint_file)
# Freeze
graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=[OUTPUT_NODE_NAME])
# Convert with TF-TRT
if use_trt:
logging.info('Number of nodes before TF-TRT conversion: %d',
len(graph_def.node))
converter = trt_convert.TrtGraphConverter(
input_graph_def=graph_def,
nodes_blacklist=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set
# max_workspace_size_bytes to 256MB to leave enough room for TF
# runtime to allocate GPU memory.
max_workspace_size_bytes=1 << 28,
minimum_segment_size=2,
use_calibration=False,
use_function_backup=False)
graph_def = converter.convert()
logging.info('Number of nodes after TF-TRT conversion: %d',
len(graph_def.node))
num_engines = len(
[1 for n in graph_def.node if str(n.op) == 'TRTEngineOp'])
self.assertEqual(1, num_engines)
return graph_def
def _Run(self, is_training, use_trt, batch_size, num_epochs, model_dir):
"""Train or evaluate the model.
Args:
is_training: whether to train or evaluate the model. In training mode,
quantization will be simulated where the quantize_and_dequantize_v2 are
placed.
use_trt: if true, use TRT INT8 mode for evaluation, which will perform
real quantization. Otherwise use native TensorFlow which will perform
simulated quantization. Ignored if is_training is True.
batch_size: batch size.
num_epochs: how many epochs to train. Ignored if is_training is False.
model_dir: where to save or load checkpoint.
Returns:
The Estimator evaluation result.
"""
# Get dataset
train_data, test_data = mnist.load_data()
def _PreprocessFn(x, y):
x = math_ops.cast(x, dtypes.float32)
x = array_ops.expand_dims(x, axis=2)
x = 2.0 * (x / 255.0) - 1.0
y = math_ops.cast(y, dtypes.int32)
return x, y
def _EvalInputFn():
mnist_x, mnist_y = test_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _TrainInputFn():
mnist_x, mnist_y = train_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.shuffle(2 * len(mnist_x))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _ModelFn(features, labels, mode):
if is_training:
logits_out = self._BuildGraph(features)
else:
graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
logits_out = importer.import_graph_def(
graph_def,
input_map={INPUT_NODE_NAME: features},
return_elements=[OUTPUT_NODE_NAME + ':0'],
name='')[0]
loss = losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits_out)
summary.scalar('loss', loss)
classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
accuracy = metrics.accuracy(
labels=labels, predictions=classes_out, name='acc_op')
summary.scalar('accuracy', accuracy[1])
if mode == ModeKeys.EVAL:
return EstimatorSpec(
mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
elif mode == ModeKeys.TRAIN:
optimizer = AdamOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss, global_step=get_global_step())
return EstimatorSpec(mode, loss=loss, train_op=train_op)
config_proto = config_pb2.ConfigProto()
config_proto.gpu_options.allow_growth = True
estimator = Estimator(
model_fn=_ModelFn,
model_dir=model_dir if is_training else None,
config=RunConfig(session_config=config_proto))
if is_training:
estimator.train(_TrainInputFn)
results = estimator.evaluate(_EvalInputFn)
logging.info('accuracy: %s', str(results['accuracy']))
return results
# To generate the checkpoint, set a different model_dir and call self._Run()
# by setting is_training=True and num_epochs=1000, e.g.:
# model_dir = '/tmp/quantization_mnist'
# self._Run(
# is_training=True,
# use_trt=False,
# batch_size=128,
# num_epochs=100,
# model_dir=model_dir)
def testEval(self):
if not is_tensorrt_enabled():
return
model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')
accuracy_tf_native = self._Run(
is_training=False,
use_trt=False,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_native: %f', accuracy_tf_native)
self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)
if get_linked_tensorrt_version()[0] < 5:
return
accuracy_tf_trt = self._Run(
is_training=False,
use_trt=True,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
nickgentoo/scikit-learn-graph | skgraph/kernel/ODDSTGraphKernel.py | 1 | 28608 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 12:44:02 2015
Copyright 2015 Nicolo' Navarin, Riccardo Tesselli
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from graphKernel import GraphKernel
from ..graph.GraphTools import generateDAG
from ..graph.GraphTools import generateDAGOrdered
from ..graph.GraphTools import orderDAGvertices
from operator import itemgetter
from ..graph.GraphTools import drawGraph
from KernelTools import convert_to_sparse_matrix
from sklearn.preprocessing import normalize
import networkx as nx
import math
import sys
import numpy as np
class ODDSTGraphKernel(GraphKernel):
"""
Class that implements the ODDKernel with ST kernel
"""
class UniqueMap(object):
"""
Inner class that creates a __map between elements and ascending unique values
"""
def __init__(self):
self.__counter=0
self.__map={}
def addElement(self,elem):
if self.__map.get(elem) is None:
self.__map[elem]=self.__counter
self.__counter+=1
def getElement(self,elem):
return self.__map.get(elem)
def __init__(self, r =3, l =1, normalization =True, ntype =0):
"""
Constructor
@type r: integer number
@param r: ODDKernel Parameter
@type l: number in (0,1]
@param l: ODDKernel Parameter
@type normalization: boolean
@param normalization: True to normalize the feature vectors
@type ntype: enum in [0,1]
@param ntype: 0 for default normalization, 1 for tanh normalization
@type show: boolean
@param show: If true shows graphs and DAGs during computation
"""
self.Lambda=l
self.max_radius=r
self.normalization=normalization
self.normalization_type = ntype
self.__startsymbol='!' #special symbols used in encoding
self.__conjsymbol='#'
self.__endsymbol='?'
def computeWeightedGraphFromFeatures(self,G,features):
"""
#TODO
"""
featureindexes=self.getFeaturesIndexes(G)
wg=nx.Graph(G)
for u in wg.nodes():
wg.node[u]['weight']=0
for f in featureindexes.keys():
if not features.get(f) is None:
value=features.get(f)
DAG=self.encodingWithIndexesToDag(featureindexes[f])[0]
for u in DAG.nodes():
wg.node[u]['weight']+=value
return wg
def computeWeightedGraph(self,G):
"""
Public method that weights a graph G given its features in the encoding with nodes' indexes instead of labels
@type G: a networkx graph
@param G: the graph to weight
@rtype: networkx graph
@return: the weighted graph
"""
dfeatures=self.getFeaturesNoCollisions(G, indexes=True)
wg=nx.Graph(G)
pairs=[]
for (k,v) in dfeatures.items():
pairs.append((self.encodingWithIndexesToDag(k)[0],v))
for (dag,frequency) in pairs:
for u in dag.nodes():
if wg.node[u].get('weight') is None:
wg.node[u]['weight']=frequency
else:
wg.node[u]['weight']+=frequency
return wg
def encodingWithLabelsToDag(self, encode, rootindex=0):
"""
Public method that creates a DAG given its encoding with the nodes' labels
@type encode: string
@param encode: the DAG's encoding
@rtype: triple (networkx.DiGraph,string,string)
@return: the triple that contains the created DAG, the left string to parse and the root's index
"""
DAG=nx.DiGraph()
start=encode.find(self.__startsymbol)
if start==-1:
start=sys.maxint
end=encode.find(self.__endsymbol)
if end==-1:
end=sys.maxint
conj=encode.find(self.__conjsymbol)
if conj==-1:
conj=sys.maxint
minindex=min(start,end,conj)
labelnode=encode[:minindex]
DAG.add_node(rootindex, label=labelnode)
encode=encode[minindex:]
if len(encode)!=0:
if encode[0]==self.__startsymbol:
encode=encode[1:]
(childDAG,encodeleft,root,indexnext)=self.encodingWithLabelsToDag(encode,rootindex+1)
childrenDAG=[childDAG]
childrenRoot=[root]
while(encodeleft[0]==self.__conjsymbol):
encodeleft=encodeleft[1:]
(childDAG,encodeleft,root,indexnext)=self.encodingWithLabelsToDag(encodeleft,indexnext)
childrenDAG.append(childDAG)
childrenRoot.append(root)
if encodeleft[0]==self.__endsymbol:
encodeleft=encodeleft[1:]
compose=nx.DiGraph(DAG)
for g in childrenDAG:
compose=nx.compose(compose,g)
for r in childrenRoot:
compose.add_edge(rootindex, r)
return (compose,encodeleft,rootindex,indexnext)
else:
return (DAG,encode,rootindex,rootindex+1)
else:
return (DAG,encode,rootindex,rootindex+1)
def encodingWithIndexesToDag(self, encode):
"""
Public method that creates a DAG given its encoding with the nodes' index
@type encode: string
@param encode: the DAG's encoding
@rtype: triple (networkx.DiGraph,string,string)
@return: the triple that contains the created DAG, the left string to parse and the root's index
"""
DAG=nx.DiGraph()
start=encode.find(self.__startsymbol)
if start==-1:
start=sys.maxint
end=encode.find(self.__endsymbol)
if end==-1:
end=sys.maxint
conj=encode.find(self.__conjsymbol)
if conj==-1:
conj=sys.maxint
minindex=min(start,end,conj)
number=int(encode[:minindex])
DAG.add_node(number)
encode=encode[minindex:]
if len(encode)!=0:
if encode[0]==self.__startsymbol:
encode=encode[1:]
(childDAG,encodeleft,root)=self.encodingWithIndexesToDag(encode)
childrenDAG=[childDAG]
childrenRoot=[root]
while(encodeleft[0]==self.__conjsymbol):
encodeleft=encodeleft[1:]
(childDAG,encodeleft,root)=self.encodingWithIndexesToDag(encodeleft)
childrenDAG.append(childDAG)
childrenRoot.append(root)
if encodeleft[0]==self.__endsymbol:
encodeleft=encodeleft[1:]
compose=nx.DiGraph(DAG)
for g in childrenDAG:
compose=nx.compose(compose,g)
for r in childrenRoot:
compose.add_edge(number, r)
return (compose,encodeleft,number)
else:
return (DAG,encode,number)
else:
return (DAG,encode,number)
def __normalization(self, feature_list):
"""
Private method that normalize the feature vector if requested
@type feature_list: Dictionary
@param feature_list: Dictionary that represent the feature vector
@rtype: Dictionary
@return: The normalized feature vector
"""
if self.normalization:
total_norm = 0.0
for value in feature_list.itervalues():
total_norm += value*value
normalized_feature_vector = {}
sqrt_total_norm = math.sqrt( float(total_norm) )
for (key,value) in feature_list.iteritems():
normalized_feature_vector[key] = value/sqrt_total_norm
return normalized_feature_vector
else :
return dict(feature_list)
def computeKernelMatrixTrain(self,Graphs):
return self.computeGram(Graphs)
def computeGram(self, g_it, jobs=-1, approx=True, precomputed=None):
"""
Public static method to compute the Gram matrix
@type g_it: networkx graph list
@param g_it: graph instances
@type jobs: integer
@param jobs: number of parallel jobs. if -1 then number of jobs is maximum
@type approx: boolean
@param approx: if True then the approximated decomposition is used
@type precomputed: csr_sparse matrix
@param precomputed: precomputed instance-features matrix
@rtype: numpy matrix
@return: the Gram matrix
"""
if precomputed is None:
precomputed=self.transform(g_it, n_jobs=jobs, approximated=approx)
return precomputed.dot(precomputed.T).todense().tolist()
def computeGramTest(self,X,Y,jobs=-1, approx=True, precomputed=None):
"""
TODO TEST
Public static method to compute the Gram matrix
@type X: scipy.sparse.csr_matrix
@param X: The instance-features matrix
@rtype: numpy matrix
@return: the Gram matrix
"""
if precomputed is None:
precomputed1=self.transform(X, n_jobs=jobs, approximated=approx)
precomputed2=self.transform(X, n_jobs=jobs, approximated=approx)
return precomputed1.dot(precomputed2.T).todense().tolist()
def __transform(self, instance_id , G_orig, approximated=True, MapEncToId=None):
"""
Private method that given a graph id and its representation computes the normalized feature vector
@type instance_id: integer number
@param instannce_id: a graph id
@type G_orig: Networkx graph
@param G_orig: a Networkx graph
@type MapEncToId: self.UniqueMap
@param MapEncToId: Map between feature's encodings and integer values
@rtype: Dictionary
@return: The normalized feature vector
"""
#feature_list = defaultdict(lambda : defaultdict(float))
feature_list={}
if approximated:
feature_list.update({(instance_id,k):v for (k,v) in self.getFeaturesApproximated(G_orig,MapEncToId).items()})
else:
feature_list.update({(instance_id,k):v for (k,v) in self.getFeaturesNoCollisions(G_orig,MapEncToId).items()})
# ve=convert_to_sparse_matrix(feature_list)
# if self.normalization:
# ve = normalize(ve, norm='l2', axis=1)
# return ve
# return self.__normalization(feature_list)
return feature_list
def __transform_serial(self, G_list, approximated=True,keepdictionary=False):
"""
Private method that converts a networkx graph list into a instance-features matrix
@type G_list: networkx graph generator
@param G_list: list of the graph to convert
@type approximated: boolean
@param approximated: true if use a hash function with probable collisions during feature decomposition. False no collision guaranteed
@type keepdictionary: boolean
@param keepdictionary: True if the instance-feature matrix is kept as a dictionary. Else is a csr_matrix
@rtype: scipy.sparse.csr_matrix
@return: the instance-features matrix
"""
feature_dict={}
MapEncToId=None
if not keepdictionary:
MapEncToId=self.UniqueMap()
for instance_id , G in enumerate( G_list ):
feature_dict.update(self.__transform( instance_id, G, approximated, MapEncToId))
# if keepdictionary:
# return (convert_to_sparse_matrix( feature_dict, MapEncToId ),feature_dict)
# else:
# return convert_to_sparse_matrix( feature_dict, MapEncToId )
ve=convert_to_sparse_matrix(feature_dict)
if self.normalization:
ve = normalize(ve, norm='l2', axis=1)
return ve
def transform(self, G_list, n_jobs = -1, approximated=True, keepdictionary=False):
"""
Public method that given a list of networkx graph it creates the sparse matrix (example, features) in parallel or serial
@type G_list: networkx graph generator
@param G_list: list of the graph to convert
@type n_jobs: integer number
@param n_jobs: number of parallel jobs
@type approximated: boolean mult=1.0
if i>13 and i<
@param approximated: true if use a hash function with probable collisions during feature decomposition. False no collision guaranteed
@type keepdictionary: boolean
@param keepdictionary: True if the instance-feature matrix is kept as a dictionary. Else is a csr_matrix
@rtype: scipy.sparse.csr_matrix
@return: the instance-features matrix
"""
if n_jobs is 1 or n_jobs is -1:
return self.__transform_serial(G_list,approximated,keepdictionary)
else:
print "WARNING: parallel calculation not implemented"
return self.__transform_serial(G_list,approximated,keepdictionary)
def getFeaturesIndexes(self,G):
"""
Public method that given a networkx graph G will create the dictionary representing its encoded features and the corresponding index nodes
@type G: networkx graph
@param G: the graph to extract features from
@rtype: dictionary
@return: the encoding-feature dictionary
"""
Dict_features={}
for v in G.nodes():
if G.node[v]['viewpoint']:
if not G.graph['ordered']:
(DAG,maxLevel)=generateDAG(G, v, self.max_radius)
orderDAGvertices(DAG)
else:
(DAG,maxLevel)=generateDAGOrdered(G, v, self.max_radius)
MapNodeToProductionsID={}
MapNodeToProductionsIDInd={}
for u in DAG.nodes():
MapNodeToProductionsID[u]=[]
MapNodeToProductionsIDInd[u]=[]
for u in nx.topological_sort(DAG)[::-1]:
max_child_height=0
for child in DAG.successors(u):
child_height=len(MapNodeToProductionsID.get(child))
if child_height > max_child_height:
max_child_height = child_height
for depth in range(max_child_height+1):
if depth==0:
enc=DAG.node[u]['label']
encind=str(u)
MapNodeToProductionsID[u].append(enc)
MapNodeToProductionsIDInd[u].append(encind)
if Dict_features.get(enc) is None:
Dict_features[enc]=encind
else:
encoding=DAG.node[u]['label']
encodingind=str(u)
vertex_label_id_list=[]
for child in DAG.successors(u):
size_map=len(MapNodeToProductionsID[child])
child_hash=MapNodeToProductionsID[child][min(size_map,depth)-1]
child_hashind=MapNodeToProductionsIDInd[child][min(size_map,depth)-1]
vertex_label_id_list.append((child_hash,child_hashind))
vertex_label_id_list.sort(key=itemgetter(0))
encoding+=self.__startsymbol+vertex_label_id_list[0][0]
encodingind+=self.__startsymbol+vertex_label_id_list[0][1]
for i in range(1,len(vertex_label_id_list)):
encoding+=self.__conjsymbol+vertex_label_id_list[i][0]
encodingind+=self.__conjsymbol+vertex_label_id_list[i][1]
encoding+=self.__endsymbol
encodingind+=self.__endsymbol
MapNodeToProductionsID[u].append(encoding)
MapNodeToProductionsIDInd[u].append(encodingind)
if Dict_features.get(encoding) is None:
Dict_features[encoding]=encodingind
return Dict_features
def getFeaturesNoCollisions(self,G,MapEncToId=None,indexes=False):
"""
Public method that given a networkx graph G will create the dictionary representing its features according to the ST Kernel
@type G: networkx graph
@param G: the graph to extract features from
@type MapEncToId: self.UniqueMap
@param MapEncToId: Map between feature's encodings and integer values
@type indexes: boolean
@param indexes: if True the feature is encoded using the nodes' index rather than theirs labels
@rtype: dictionary
@return: the encoding-feature dictionary
"""
Dict_features={}
for v in G.nodes():
(DAG,maxLevel)=generateDAG(G, v, self.max_radius)
MapNodeToProductionsID={} #k:list(unsigned)
MapNodetoFrequencies={} #k:list(int)
for u in DAG.nodes():
MapNodeToProductionsID[u]=[]
MapNodetoFrequencies[u]=[]
MapProductionIDtoSize={} #k:int
for u in nx.topological_sort(DAG)[::-1]:
max_child_height=0
for child in DAG.successors(u):
child_height=0
child_height=len(MapNodeToProductionsID.get(child))
if child_height > max_child_height:
max_child_height = child_height
for depth in range(max_child_height+1):
if depth==0:
if not indexes:
enc=DAG.node[u]['label']
else:
enc=str(u)
MapNodeToProductionsID[u].append(enc)
frequency=0
if max_child_height==0:
frequency=maxLevel - DAG.node[u]['depth']
if Dict_features.get(enc) is None:
Dict_features[enc]=float(frequency+1.0)*math.sqrt(self.Lambda)
else:
Dict_features[enc]+=float(frequency+1.0)*math.sqrt(self.Lambda)
if not MapEncToId is None:
MapEncToId.addElement(enc)
MapNodetoFrequencies[u].append(frequency)
MapProductionIDtoSize[enc]=1
else:
size=0
if not indexes:
encoding=DAG.node[u]['label']
else:
encoding=str(u)
vertex_label_id_list=[]#list[string]
min_freq_children=sys.maxint
for child in DAG.successors(u):
size_map=len(MapNodeToProductionsID[child])
child_hash=MapNodeToProductionsID[child][min(size_map,depth)-1]
freq_child=MapNodetoFrequencies[child][min(size_map,depth)-1]
if freq_child<min_freq_children:
min_freq_children=freq_child
vertex_label_id_list.append(child_hash)
size+=MapProductionIDtoSize[child_hash]
vertex_label_id_list.sort()
encoding+=self.__startsymbol+vertex_label_id_list[0]
for i in range(1,len(vertex_label_id_list)):
encoding+=self.__conjsymbol+vertex_label_id_list[i]
encoding+=self.__endsymbol
MapNodeToProductionsID[u].append(encoding)
size+=1
MapProductionIDtoSize[encoding]=size
frequency = min_freq_children
MapNodetoFrequencies[u].append(frequency)
if Dict_features.get(encoding) is None:
Dict_features[encoding]=float(frequency+1.0)*math.sqrt(math.pow(self.Lambda,size))
else:
Dict_features[encoding]+=float(frequency+1.0)*math.sqrt(math.pow(self.Lambda,size))
if not MapEncToId is None:
MapEncToId.addElement(encoding)
return Dict_features
def getFeaturesApproximated(self,G,MapEncToId=None):#TODO usa xxhash lib con bitsize settabile
"""
Public method that given a networkx graph G will create the dictionary representing its features according to the ST Kernel.
The computation will use a hash function to encode a feature. There might be collisions
@type G: networkx graph
@param G: the graph to extract features from
@type hashsize: integer
@param hashsize: number of bits of the hash function to use
@type MapEncToId: self.UniqueMap
@param MapEncToId: Map between feature's encodings and integer values
@rtype: dictionary
@return: the encoding-feature dictionary
"""
Dict_features={}
for v in G.nodes():
if G.node[v]['viewpoint']:
if not G.graph['ordered']:
(DAG,maxLevel)=generateDAG(G, v, self.max_radius)
#orderDAGvertices(DAG)
else:
(DAG,maxLevel)=generateDAGOrdered(G, v, self.max_radius)
MapNodeToProductionsID={} #k:list(unsigned)
MapNodetoFrequencies={} #k:list(int)
for u in DAG.nodes():
MapNodeToProductionsID[u]=[]
MapNodetoFrequencies[u]=[]
MapProductionIDtoSize={} #k:int
for u in nx.topological_sort(DAG)[::-1]:
max_child_height=0
for child in DAG.successors(u):
child_height=len(MapNodeToProductionsID.get(child))
if child_height > max_child_height:
max_child_height = child_height
for depth in xrange(max_child_height+1):
if depth==0:
enc=hash(str(DAG.node[u]['label']))
MapNodeToProductionsID[u].append(enc)
frequency=0
if max_child_height==0:
frequency=maxLevel - DAG.node[u]['depth']
weight = float(frequency+1.0)*math.sqrt(self.Lambda)
if self.normalization and self.normalization_type == 1:
weight = math.tanh(float(frequency+1.0))*math.tanh(math.sqrt(self.Lambda))
if Dict_features.get(enc) is None:
Dict_features[enc] = weight
else:
Dict_features[enc] += weight
if not MapEncToId is None:
MapEncToId.addElement(enc)
MapNodetoFrequencies[u].append(frequency)
MapProductionIDtoSize[enc]=1
else:
size=0
encoding=str(DAG.node[u]['label'])
vertex_label_id_list=[]#list[string]
min_freq_children=sys.maxint
for child in DAG.successors(u):
size_map=len(MapNodeToProductionsID[child])
child_hash=MapNodeToProductionsID[child][min(size_map,depth)-1]
freq_child=MapNodetoFrequencies[child][min(size_map,depth)-1]
if freq_child<min_freq_children:
min_freq_children=freq_child
vertex_label_id_list.append(child_hash)
size+=MapProductionIDtoSize[child_hash]
vertex_label_id_list.sort()
encoding+=self.__startsymbol+str(vertex_label_id_list[0])
for i in range(1,len(vertex_label_id_list)):
encoding+=self.__conjsymbol+str(vertex_label_id_list[i])
encoding+=self.__endsymbol
encoding=hash(encoding)
MapNodeToProductionsID[u].append(encoding)
size+=1
MapProductionIDtoSize[encoding]=size
frequency = min_freq_children
MapNodetoFrequencies[u].append(frequency)
weight = float(frequency+1.0)*math.sqrt(math.pow(self.Lambda,size))
if self.normalization and self.normalization_type == 1:
weight = math.tanh(float(frequency+1.0))*math.tanh(math.sqrt(math.pow(self.Lambda,size)))
if Dict_features.get(encoding) is None:
Dict_features[encoding] = weight
else:
Dict_features[encoding] += weight
if not MapEncToId is None:
MapEncToId.addElement(encoding)
return Dict_features
def kernelFunction(self,Graph1, Graph2):
"""
#TODO
"""
G_list = [Graph1, Graph2]
X=self.transform(G_list, n_jobs=1)
row1=X[0]
row2=X[1]
return row1.dot(row2.T)[0,0]
| gpl-3.0 |
rbrecheisen/arff-utils | arff_utils/arff_utils.py | 1 | 17913 | # -*- coding: utf-8 -*-
__author__ = 'Ralph'
import arff
import numpy as np
import pandas as pd
class ARFF(object):
@staticmethod
def read(file_name, missing=None):
"""
Loads ARFF file into data dictionary. Missing values indicated
by '?' are automatically converted to None. If you want some
other value to be treated as missing, specify them in the
missing parameter.
:param file_name: File name
:param missing: List of missing value representations
:return: Data dictionary
"""
data = arff.load(open(file_name))
if missing is not None:
for i in range(len(data['data'])):
for j in range(len(data['data'][i])):
if type(missing) is str:
if data['data'][i][j] == missing:
data['data'][i][j] = None
elif type(missing) is list:
for m in missing:
if data['data'][i][j] == m:
data['data'][i][j] = None
else:
raise RuntimeError('Invalid type for \'missing\' parameter ' + str(type(missing)))
return data
@staticmethod
def read_from_csv(file_name):
"""
Loads CSV file and converts it to an ARFF data dictionary. This
function assumes the following:
(1) First line contains a header with column names
(2) First column contains IDs (interpreted as string values)
(3) Remaining columns contain numeric values
:param file_name: CSV file path
"""
attributes = []
f = open(file_name, 'r')
header = f.readline().strip().split(',')
header = [item.strip() for item in header]
attributes.append((header[0], 'STRING'))
for item in header[1:]:
attributes.append((item, 'NUMERIC'))
data = []
for line in f.readlines():
line = line.strip()
if line.startswith('#') or line == '':
continue
parts = line.split(',')
parts = [part.strip() for part in parts]
parts[0] = str(parts[0])
parts[1:] = [float(part) for part in parts[1:]]
data.append(parts)
f.close()
return {
'relation': 'unknown',
'attributes': attributes,
'data': data,
'description': ''
}
@staticmethod
def to_data_frame(data, index_col=None):
"""
Converts ARFF data dictionary to Pandas data frame.
:param data: Data dictionary
:param index_col: Column name to use as index
:return: Data frame
"""
# Create data frame by by taking rows and attributes from
# ARFF data. Data types should be automatically inferred
rows = data['data']
columns = [attribute[0] for attribute in data['attributes']]
# Get categorical-type columns
categoricals = []
for attribute in data['attributes']:
column = attribute[0]
if type(attribute[1]) is list:
categoricals.append(column)
# Create data frame from ARFF dictionary
data_frame = pd.DataFrame(rows, columns=columns)
for categorical in categoricals:
data_frame[categorical] = data_frame[categorical].astype('category')
# If index column specified, set it
if index_col is not None:
if index_col not in data_frame.columns:
raise RuntimeError('Index column ' + index_col + ' not found')
data_frame.set_index(index_col, drop=True, inplace=True, verify_integrity=True)
return data_frame
@staticmethod
def from_data_frame(relation, attributes, data_frame, description=''):
"""
Converts Pandas data frame to ARFF dictionary. This is only possible
if the data frame was previously converted from ARFF data because we
need the specific attribute information.
:param relation: Relation
:param attributes: ARFF attributes
:param data_frame: Data frame
:param description: Optional description
:return: ARFF data dictionary
"""
data = []
for row in data_frame.to_records(index=False):
data.append(list(row))
return {
'relation': relation,
'attributes': attributes,
'data': data,
'description': description
}
@staticmethod
def from_data_frame(relation, data_frame):
"""
Converts Pandas data frame to ARFF dictionary. Attribute types are
automatically inferred.
:param relation: Relation name
:param data_frame: Data frame
:return: ARFF data dictionary
"""
attributes = []
for name in data_frame.columns:
column = data_frame[name]
if column.dtype is np.dtype('int') or column.dtype is np.dtype('float'):
attributes.append((name, 'NUMERIC'))
elif column.dtype is np.dtype('object'):
attributes.append((name, 'STRING'))
elif column.dtype.name is 'category':
attributes.append((name, list(column.cat.categories)))
data = []
for row in data_frame.to_records(index=False):
data.append(list(row))
return {
'relation': relation,
'attributes': attributes,
'data': data,
'description': 'Converted from Pandas data frame'
}
@staticmethod
def test():
data = ARFF.to_data_frame(
ARFF.read('/Users/Ralph/datasets/pyminer/features.arff'))
ARFF.from_data_frame(data)
@staticmethod
def write(file_name, data):
"""
Writes ARFF data dictionary to file.
:param file_name: File name
:param data: Data dictionary
:return:
"""
f = open(file_name, 'w')
arff.dump(data, f)
f.close()
@staticmethod
def write_csv(file_name, data):
"""
Writes ARFF data dictionary to CSV file. Note that this will
cause loss of attribute type information. The approach we take
here is to first convert to a Pandas data frame and then use the
Pandas built-in function to export to CSV.
:param file_name: CSV file name
:param data: Data dictionary
:return:
"""
data_frame = ARFF.to_data_frame(data)
data_frame.to_csv(file_name, na_rep='?', header=True, index=False, sep=',')
@staticmethod
def append(data1, data2):
"""
Appends contents of ARFF data dictionary 'data2' to the contents
of data dictionary 'data1'. Obviously, the attributes and types
must correspond exactly.
:param data1: Base data dictionary.
:param data2: Dictionary to append
:return: Updated dictionary
"""
# Use description of data1
description = data1['description']
# Check whether we have matching attributes
attributes1 = data1['attributes']
attributes2 = data2['attributes']
if not len(attributes1) == len(attributes2):
raise RuntimeError('Mismatch number of attributes')
for i in range(len(attributes1)):
attribute1 = attributes1[i]
attribute2 = attributes2[i]
if not len(attribute1) == 2:
raise RuntimeError('Number of attribute1 items != 2')
if not len(attribute2) == 2:
raise RuntimeError('Number of attribute2 items != 2')
if not unicode(attribute1[0]) == unicode(attribute2[0]):
raise RuntimeError('Mismatching names at ' + str(i) + ' (' + attribute1[0] + ' vs ' + attribute2[0] + ')')
if type(attribute1[1]) is list and type(attribute2[1]) is list:
for j in range(len(attribute1[1])):
if not unicode(attribute1[1][j]) == unicode(attribute2[1][j]):
raise RuntimeError('Mismatching nominal values at ('
+ str(i) + ',' + str(j) + ') (' + unicode(attribute1[1][j]) + ' vs ' +
unicode(attribute2[1][j]) + ')')
elif not unicode(attribute1[1]) == unicode(attribute2[1]):
raise RuntimeError('Mismatching attribute types (' +
unicode(attribute1[1]) + ' vs ' + unicode(attribute2[1]) + ')')
# Append rows of data2 to rows of data1
data = []
data.extend(data1['data'])
data.extend(data2['data'])
return {
'relation': relation1,
'attributes': attributes1,
'data': data,
'description': description
}
@staticmethod
def merge(data1, data2, join_by, attributes):
"""
Merges two data sets by appending the columns of data2 associated
with given attributes to data1. Rows are matched based on the
join_by attribute.
:param data1: Original data set
:param data2: Data set whose columns to add
:param join_by: Attribute for matching data rows
:param attributes: Attributes to add
:return: New data set
"""
# Check that both data sets have the merge attribute otherwise we
# can never match rows from one with rows from the other
if not ARFF.contains(data1, join_by):
raise RuntimeError('Attribute ' + join_by + ' missing from data1')
if not ARFF.contains(data2, join_by):
raise RuntimeError('Attribute ' + join_by + ' missing from data2')
# Check that data2 has the given attributes
for attribute in attributes:
if not ARFF.contains(data2, attribute):
raise RuntimeError('Attribute ' + attribute + ' missing from data2')
# Check that data1 does not have the given attributes
for attribute in attributes:
if ARFF.contains(data1, attribute):
raise RuntimeError('Attribute ' + attribute + ' already exists in data1')
# Get index of join_by attribute in both data sets. Then, create a
# lookup table for data2 based on join_by attribute as key. This
# allows quick access to data rows of data2. If we iterate through
# the rows of data1, we can get the join_by attribute value using
# the join_idx1 index. Using the attribute value we can then lookup
# the corresponding data row in data2.
join_idx1 = ARFF.index_of(data1, join_by)
join_idx2 = ARFF.index_of(data2, join_by)
data2_lookup = {}
for data_row1 in data2['data']:
data2_lookup[data_row1[join_idx2]] = data_row1
# Get indexes associated with given attributes in data2. We need this
# to efficiently access specific values in the rows of data2
attribute_indexes = []
for attribute in attributes:
attribute_indexes.append(ARFF.index_of(data2, attribute))
# Create new attribute set by appending the attributes of
# data set data2. We already checked there are no duplicates.
attributes_extended = data1['attributes']
for i in attribute_indexes:
attribute = data2['attributes'][i]
attributes_extended.append(attribute)
# Create new data rows by taking the original data row and
# appending the values corresponding to the attribute columns from
# data2. We can do this efficiently because of the lookup table we
# created earlier.
data = []
for i in range(len(data1['data'])):
data_row = data1['data'][i]
key = data_row[join_idx1]
if key not in data2_lookup:
print('WARNING: row with id {} not present in data2'.format(key))
continue
data_row2 = data2_lookup[data_row[join_idx1]]
for j in attribute_indexes:
data_row.append(data_row2[j])
data.append(data_row)
return {
'relation': data1['relation'],
'attributes': attributes_extended,
'data': data,
'description': ''
}
@staticmethod
def dummy_encode(data, attribute):
"""
Applies a 1-of-k dummy encoding to the given attribute and replaces
the associated column with two or more dummy columns. Note that if
there are only two levels, they are just converted to zero and one
instead of creating new columns for them.
:param data: ARFF data dictionary
:param attribute: Nominal attribute
:return: Dummy encoded data dictionary, new attributes
"""
# Check that the attribute is actually nominal. If not, just
# return the data unchanged
if not ARFF.is_nominal(data, attribute):
return data
# Get index of given attribute. We need it when we insert
# additional dummy columns.
idx = ARFF.index_of(data, attribute)
# Get attribute values
attr_values = data['attributes'][idx][1]
if len(attr_values) == 2:
# If we're dealing with a binominal attribute there's no need
# to split it up in separate dummy columns. Just convert the
# values to 0's and 1's.
data['attributes'][idx] = (attribute, 'NUMERIC')
data_rows = data['data']
for i in range(len(data_rows)):
value = data_rows[i][idx]
if value == attr_values[0]:
data_rows[i][idx] = 0
else:
data_rows[i][idx] = 1
return data, [attribute]
else:
# Next, delete the original attribute and insert new attributes
# for each attribute value we encounter
del data['attributes'][idx]
for attr_value in reversed(attr_values):
data['attributes'].insert(idx, (attr_value, 'NUMERIC'))
# Insert dummy values into each data row depending on its
# original value in the attribute column
data_rows = data['data']
for i in range(len(data_rows)):
value = data_rows[i][idx]
first = True
for attr_value in reversed(attr_values):
if first:
data_rows[i][idx] = 0
first = False
else:
data_rows[i].insert(idx, 0)
if value == attr_value:
data_rows[i][idx] = 1
return data, attr_values
@staticmethod
def contains(data, attribute):
"""
Checks whether given attribute is in data dictionary.
:param data: Data dictionary
:param attribute: Attribute to check
:return: True/False
"""
return ARFF.index_of(data, attribute) > -1
@staticmethod
def index_of(data, attribute):
"""
Returns index of given attribute or -1 if not found.
:param data: Data dictionary
:param attribute: Attribute to search
:return: Index or -1
"""
for i in range(len(data['attributes'])):
item = data['attributes'][i][0]
if item == attribute:
return i
return -1
@staticmethod
def type_of(data, attribute):
"""
Returns type of given attribute or None if attribute is
of nominal type. In that case, use labels_of()
:param data: Data dictionary
:param attribute: Attribute to return type of
:return: Attribute type
"""
i = ARFF.index_of(data, attribute)
if i < 0:
return None
attribute_value = data['attributes'][i][1]
if isinstance(attribute_value, list):
print('WARNING: attribute value is nominal')
return None
else:
return attribute_value
@staticmethod
def labels_of(data, attribute):
"""
Returns labels of given nominal attribute or None if
attribute is not of nominal type.
:param data: Data dictionary
:param attribute: Attribute to return labels of
:return: Labels
"""
i = ARFF.index_of(data, attribute)
if i < 0:
return None
attribute_values = data['attributes'][i][1]
if not isinstance(attribute_values, list):
print('WARNING: attribute value is not of type nominal')
return None
else:
return attribute_values
@staticmethod
def sort_by(data, attribute):
"""
Sorts data by given attribute.
:param data: ARFF data dictionary
:param attribute: Attribute to sort by
:return: Sorted dictionary
"""
i = ARFF.index_of(data, attribute)
if i < 0:
raise RuntimeError('Attribute not found')
data['data'].sort(key=lambda tup: tup[i])
return data
@staticmethod
def is_nominal(data, attribute):
"""
Checks whether given attribute name corresponds to
nominal attribute or not.
:param data: ARFF data dictionary
:param attribute: Attribute to check
"""
i = ARFF.index_of(data, attribute)
if i < 0:
raise RuntimeError('Attribute not found')
attribute_value = data['attributes'][i][1]
if type(attribute_value) is list:
return True
return False
if __name__ == '__main__':
ARFF.test() | apache-2.0 |
hyperspy/hyperspy | hyperspy/component.py | 2 | 50888 | # -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <https://www.gnu.org/licenses/#GPL>.
import numpy as np
from dask.array import Array as dArray
import traits.api as t
from traits.trait_numeric import Array
import sympy
from sympy.utilities.lambdify import lambdify
from packaging.version import Version
from pathlib import Path
import hyperspy
from hyperspy.misc.utils import slugify, is_binned
from hyperspy.misc.io.tools import (incremental_filename,
append2pathname,)
from hyperspy.misc.export_dictionary import export_to_dictionary, \
load_from_dictionary
from hyperspy.events import Events, Event
from hyperspy.ui_registry import add_gui_method
from IPython.display import display_pretty, display
from hyperspy.misc.model_tools import current_component_values
from hyperspy.misc.utils import get_object_package_info
import logging
_logger = logging.getLogger(__name__)
class NoneFloat(t.CFloat): # Lazy solution, but usable
default_value = None
def validate(self, object, name, value):
if value == "None" or value == b"None":
value = None
if value is None:
super(NoneFloat, self).validate(object, name, 0)
return None
return super(NoneFloat, self).validate(object, name, value)
@add_gui_method(toolkey="hyperspy.Parameter")
class Parameter(t.HasTraits):
"""Model parameter
Attributes
----------
value : float or array
The value of the parameter for the current location. The value
for other locations is stored in map.
bmin, bmax: float
Lower and upper bounds of the parameter value.
twin : {None, Parameter}
If it is not None, the value of the current parameter is
a function of the given Parameter. The function is by default
the identity function, but it can be defined by twin_function
twin_function_expr: str
Expression of the ``twin_function`` that enables setting a functional
relationship between the parameter and its twin. If ``twin`` is not
``None``, the parameter value is calculated as the output of calling the
twin function with the value of the twin parameter. The string is
parsed using sympy, so permitted values are any valid sympy expressions
of one variable. If the function is invertible the twin inverse function
is set automatically.
twin_inverse_function_expr : str
Expression of the ``twin_inverse_function`` that enables setting the
value of the twin parameter. If ``twin`` is not
``None``, its value is set to the output of calling the
twin inverse function with the value provided. The string is
parsed using sympy, so permitted values are any valid sympy expressions
of one variable.
twin_function : function
**Setting this attribute manually
is deprecated in HyperSpy newer than 1.1.2. It will become private in
HyperSpy 2.0. Please use ``twin_function_expr`` instead.**
twin_inverse_function : function
**Setting this attribute manually
is deprecated in HyperSpy newer than 1.1.2. It will become private in
HyperSpy 2.0. Please use ``twin_inverse_function_expr`` instead.**
ext_force_positive : bool
If True, the parameter value is set to be the absolute value
of the input value i.e. if we set Parameter.value = -3, the
value stored is 3 instead. This is useful to bound a value
to be positive in an optimization without actually using an
optimizer that supports bounding.
ext_bounded : bool
Similar to ext_force_positive, but in this case the bounds are
defined by bmin and bmax. It is a better idea to use
an optimizer that supports bounding though.
Methods
-------
connect, disconnect(function)
Call the functions connected when the value attribute changes.
"""
__number_of_elements = 1
__value = 0
_free = True
_bounds = (None, None)
__twin = None
_axes_manager = None
__ext_bounded = False
__ext_force_positive = False
# traitsui bugs out trying to make an editor for this, so always specify!
# (it bugs out, because both editor shares the object, and Array editors
# don't like non-sequence objects). TextEditor() works well, so does
# RangeEditor() as it works with bmin/bmax.
value = t.Property(t.Either([t.CFloat(0), Array()]))
units = t.Str('')
free = t.Property(t.CBool(True))
bmin = t.Property(NoneFloat(), label="Lower bounds")
bmax = t.Property(NoneFloat(), label="Upper bounds")
_twin_function_expr = ""
_twin_inverse_function_expr = ""
twin_function = None
_twin_inverse_function = None
_twin_inverse_sympy = None
def __init__(self):
self._twins = set()
self.events = Events()
self.events.value_changed = Event("""
Event that triggers when the `Parameter.value` changes.
The event triggers after the internal state of the `Parameter` has
been updated.
Arguments
---------
obj : Parameter
The `Parameter` that the event belongs to
value : {float | array}
The new value of the parameter
""", arguments=["obj", 'value'])
self.std = None
self.component = None
self.grad = None
self.name = ''
self.units = ''
self._linear = False
self.map = None
self.model = None
self._whitelist = {'_id_name': None,
'value': None,
'std': None,
'free': None,
'units': None,
'map': None,
'_bounds': None,
'ext_bounded': None,
'name': None,
'_linear':None,
'ext_force_positive': None,
'twin_function_expr': None,
'twin_inverse_function_expr': None,
'self': ('id', None),
}
self._slicing_whitelist = {'map': 'inav'}
def _load_dictionary(self, dictionary):
"""Load data from dictionary.
Parameters
----------
dict : dict
A dictionary containing at least the following fields:
* _id_name: ``_id_name`` of the original parameter, used to create
the dictionary. Has to match with the ``self._id_name``.
* _whitelist: a dictionary, which keys are used as keywords to
match with the parameter attributes. For more information see
:py:func:`~hyperspy.misc.export_dictionary.load_from_dictionary`
* any field from ``_whitelist.keys()``.
Returns
-------
id_value : int
the ID value of the original parameter, to be later used for
setting up the correct twins
"""
if dictionary['_id_name'] == self._id_name:
load_from_dictionary(self, dictionary)
return dictionary['self']
else:
raise ValueError("_id_name of parameter and dictionary do not match, \nparameter._id_name = %s\
\ndictionary['_id_name'] = %s" % (self._id_name, dictionary['_id_name']))
def __repr__(self):
text = ''
text += 'Parameter %s' % self.name
if self.component is not None:
text += ' of %s' % self.component._get_short_description()
text = '<' + text + '>'
return text
def __len__(self):
return self._number_of_elements
@property
def twin_function_expr(self):
return self._twin_function_expr
@twin_function_expr.setter
def twin_function_expr(self, value):
if not value:
self.twin_function = None
self.twin_inverse_function = None
self._twin_function_expr = ""
self._twin_inverse_sympy = None
return
expr = sympy.sympify(value)
if len(expr.free_symbols) > 1:
raise ValueError("The expression must contain only one variable.")
elif len(expr.free_symbols) == 0:
raise ValueError("The expression must contain one variable, "
"it contains none.")
x = tuple(expr.free_symbols)[0]
self.twin_function = lambdify(x, expr.evalf())
self._twin_function_expr = value
if not self.twin_inverse_function:
y = sympy.Symbol(x.name + "2")
try:
inv = list(sympy.solveset(sympy.Eq(y, expr), x))
self._twin_inverse_sympy = lambdify(y, inv)
self._twin_inverse_function = None
except BaseException:
# Not all may have a suitable solution.
self._twin_inverse_function = None
self._twin_inverse_sympy = None
_logger.warning(
"The function {} is not invertible. Setting the value of "
"{} will raise an AttributeError unless you set manually "
"``twin_inverse_function_expr``. Otherwise, set the "
"value of its twin parameter instead.".format(value, self))
@property
def twin_inverse_function_expr(self):
if self.twin:
return self._twin_inverse_function_expr
else:
return ""
@twin_inverse_function_expr.setter
def twin_inverse_function_expr(self, value):
if not value:
self.twin_inverse_function = None
self._twin_inverse_function_expr = ""
return
expr = sympy.sympify(value)
if len(expr.free_symbols) > 1:
raise ValueError("The expression must contain only one variable.")
elif len(expr.free_symbols) == 0:
raise ValueError("The expression must contain one variable, "
"it contains none.")
x = tuple(expr.free_symbols)[0]
self._twin_inverse_function = lambdify(x, expr.evalf())
self._twin_inverse_function_expr = value
@property
def twin_inverse_function(self):
if (not self.twin_inverse_function_expr and
self.twin_function_expr and self._twin_inverse_sympy):
return lambda x: self._twin_inverse_sympy(x).pop()
else:
return self._twin_inverse_function
@twin_inverse_function.setter
def twin_inverse_function(self, value):
self._twin_inverse_function = value
def _get_value(self):
if self.twin is None:
return self.__value
else:
if self.twin_function:
return self.twin_function(self.twin.value)
else:
return self.twin.value
def _set_value(self, value):
try:
# Use try/except instead of hasattr("__len__") because a numpy
# memmap has a __len__ wrapper even for numbers that raises a
# TypeError when calling. See issue #349.
if len(value) != self._number_of_elements:
raise ValueError(
"The length of the parameter must be ",
self._number_of_elements)
else:
if not isinstance(value, tuple):
value = tuple(value)
except TypeError:
if self._number_of_elements != 1:
raise ValueError(
"The length of the parameter must be ",
self._number_of_elements)
old_value = self.__value
if self.twin is not None:
if self.twin_function is not None:
if self.twin_inverse_function is not None:
self.twin.value = self.twin_inverse_function(value)
return
else:
raise AttributeError(
"This parameter has a ``twin_function`` but"
"its ``twin_inverse_function`` is not defined.")
else:
self.twin.value = value
return
if self.ext_bounded is False:
self.__value = value
else:
if self.ext_force_positive is True:
value = np.abs(value)
if self._number_of_elements == 1:
if self.bmin is not None and value <= self.bmin:
self.__value = self.bmin
elif self.bmax is not None and value >= self.bmax:
self.__value = self.bmax
else:
self.__value = value
else:
bmin = (self.bmin if self.bmin is not None
else -np.inf)
bmax = (self.bmax if self.bmin is not None
else np.inf)
self.__value = np.clip(value, bmin, bmax)
if (self._number_of_elements != 1 and
not isinstance(self.__value, tuple)):
self.__value = tuple(self.__value)
if old_value != self.__value:
self.events.value_changed.trigger(value=self.__value,
obj=self)
self.trait_property_changed('value', old_value, self.__value)
# Fix the parameter when coupled
def _get_free(self):
if self.twin is None:
return self._free
else:
return False
def _set_free(self, arg):
if arg and self.twin:
raise ValueError(f"Parameter {self.name} can't be set free "
"is twinned with {self.twin}.")
old_value = self._free
self._free = arg
if self.component is not None:
self.component._update_free_parameters()
self.trait_property_changed('free', old_value, self._free)
def _on_twin_update(self, value, twin=None):
if (twin is not None
and hasattr(twin, 'events')
and hasattr(twin.events, 'value_changed')):
with twin.events.value_changed.suppress_callback(
self._on_twin_update):
self.events.value_changed.trigger(value=value, obj=self)
else:
self.events.value_changed.trigger(value=value, obj=self)
def _set_twin(self, arg):
if arg is None:
if self.twin is not None:
# Store the value of the twin in order to set the
# value of the parameter when it is uncoupled
twin_value = self.value
if self in self.twin._twins:
self.twin._twins.remove(self)
self.twin.events.value_changed.disconnect(
self._on_twin_update)
self.__twin = arg
self.value = twin_value
else:
if self not in arg._twins:
arg._twins.add(self)
arg.events.value_changed.connect(self._on_twin_update,
["value"])
self.__twin = arg
if self.component is not None:
self.component._update_free_parameters()
def _get_twin(self):
return self.__twin
twin = property(_get_twin, _set_twin)
def _get_bmin(self):
if self._number_of_elements == 1:
return self._bounds[0]
else:
return self._bounds[0][0]
def _set_bmin(self, arg):
old_value = self.bmin
if self._number_of_elements == 1:
self._bounds = (arg, self.bmax)
else:
self._bounds = ((arg, self.bmax),) * self._number_of_elements
# Update the value to take into account the new bounds
self.value = self.value
self.trait_property_changed('bmin', old_value, arg)
def _get_bmax(self):
if self._number_of_elements == 1:
return self._bounds[1]
else:
return self._bounds[0][1]
def _set_bmax(self, arg):
old_value = self.bmax
if self._number_of_elements == 1:
self._bounds = (self.bmin, arg)
else:
self._bounds = ((self.bmin, arg),) * self._number_of_elements
# Update the value to take into account the new bounds
self.value = self.value
self.trait_property_changed('bmax', old_value, arg)
@property
def _number_of_elements(self):
return self.__number_of_elements
@_number_of_elements.setter
def _number_of_elements(self, arg):
# Do nothing if the number of arguments stays the same
if self.__number_of_elements == arg:
return
if arg < 1:
raise ValueError("Please provide an integer number equal "
"or greater to 1")
self._bounds = ((self.bmin, self.bmax),) * arg
self.__number_of_elements = arg
if arg == 1:
self._Parameter__value = 0
else:
self._Parameter__value = (0,) * arg
if self.component is not None:
self.component.update_number_parameters()
@property
def ext_bounded(self):
return self.__ext_bounded
@ext_bounded.setter
def ext_bounded(self, arg):
if arg is not self.__ext_bounded:
self.__ext_bounded = arg
# Update the value to take into account the new bounds
self.value = self.value
@property
def ext_force_positive(self):
return self.__ext_force_positive
@ext_force_positive.setter
def ext_force_positive(self, arg):
if arg is not self.__ext_force_positive:
self.__ext_force_positive = arg
# Update the value to take into account the new bounds
self.value = self.value
def store_current_value_in_array(self):
"""Store the value and std attributes.
See also
--------
fetch, assign_current_value_to_all
"""
indices = self._axes_manager.indices[::-1]
# If it is a single spectrum indices is ()
if not indices:
indices = (0,)
self.map['values'][indices] = self.value
self.map['is_set'][indices] = True
if self.std is not None:
self.map['std'][indices] = self.std
def fetch(self):
"""Fetch the stored value and std attributes from the
parameter.map['values'] and ...['std'] if
`parameter.map['is_set']` is True for that index. Updates
`parameter.value` and `parameter.std`.
If not stored, then .value and .std will remain from their
previous values, i.e. from a fit in a previous pixel.
See Also
--------
store_current_value_in_array, assign_current_value_to_all
"""
indices = self._axes_manager.indices[::-1]
# If it is a single spectrum indices is ()
if not indices:
indices = (0,)
if self.map['is_set'][indices]:
value = self.map['values'][indices]
std = self.map['std'][indices]
if isinstance(value, dArray):
value = value.compute()
if isinstance(std, dArray):
std = std.compute()
self.value = value
self.std = std
def assign_current_value_to_all(self, mask=None):
"""Assign the current value attribute to all the indices,
setting parameter.map for all parameters in the component.
Takes the current `parameter.value` and sets it for all
indices in `parameter.map['values']`.
Parameters
----------
mask: {None, boolean numpy array}
Set only the indices that are not masked i.e. where
mask is False.
See Also
--------
store_current_value_in_array, fetch
"""
if mask is None:
mask = np.zeros(self.map.shape, dtype='bool')
self.map['values'][mask == False] = self.value
self.map['is_set'][mask == False] = True
def _create_array(self):
"""Create the map array to store the information in
multidimensional datasets.
"""
shape = self._axes_manager._navigation_shape_in_array
if not shape:
shape = [1, ]
# Shape-1 fields in dtypes won’t be collapsed to scalars in a future
# numpy version (see release notes numpy 1.17.0)
if self._number_of_elements > 1:
dtype_ = np.dtype([
('values', 'float', self._number_of_elements),
('std', 'float', self._number_of_elements),
('is_set', 'bool')])
else:
dtype_ = np.dtype([
('values', 'float'),
('std', 'float'),
('is_set', 'bool')])
if (self.map is None or self.map.shape != shape or
self.map.dtype != dtype_):
self.map = np.zeros(shape, dtype_)
self.map['std'].fill(np.nan)
# TODO: in the future this class should have access to
# axes manager and should be able to fetch its own
# values. Until then, the next line is necessary to avoid
# erros when self.std is defined and the shape is different
# from the newly defined arrays
self.std = None
def as_signal(self, field='values'):
"""Get a parameter map as a signal object.
Please note that this method only works when the navigation
dimension is greater than 0.
Parameters
----------
field : {'values', 'std', 'is_set'}
Field to return as signal.
Raises
------
NavigationDimensionError
If the navigation dimension is 0
"""
from hyperspy.signal import BaseSignal
s = BaseSignal(data=self.map[field],
axes=self._axes_manager._get_navigation_axes_dicts())
if self.component is not None and \
self.component.active_is_multidimensional:
s.data[np.logical_not(self.component._active_array)] = np.nan
s.metadata.General.title = ("%s parameter" % self.name
if self.component is None
else "%s parameter of %s component" %
(self.name, self.component.name))
for axis in s.axes_manager._axes:
axis.navigate = False
if self._number_of_elements > 1:
s.axes_manager._append_axis(
size=self._number_of_elements,
name=self.name,
navigate=True)
s._assign_subclass()
if field == "values":
# Add the variance if available
std = self.as_signal(field="std")
if not np.isnan(std.data).all():
std.data = std.data ** 2
std.metadata.General.title = "Variance"
s.metadata.set_item(
"Signal.Noise_properties.variance", std)
return s
def plot(self, **kwargs):
"""Plot parameter signal.
Parameters
----------
**kwargs
Any extra keyword arguments are passed to the signal plot.
Example
-------
>>> parameter.plot() #doctest: +SKIP
Set the minimum and maximum displayed values
>>> parameter.plot(vmin=0, vmax=1) #doctest: +SKIP
"""
self.as_signal().plot(**kwargs)
def export(self, folder=None, name=None, format="hspy",
save_std=False):
"""Save the data to a file. All the arguments are optional.
Parameters
----------
folder : str or None
The path to the folder where the file will be saved.
If `None` the current folder is used by default.
name : str or None
The name of the file. If `None` the Components name followed
by the Parameter `name` attributes will be used by default.
If a file with the same name exists the name will be
modified by appending a number to the file path.
save_std : bool
If True, also the standard deviation will be saved
format: str
The extension of any file format supported by HyperSpy, default
``hspy``.
"""
if format is None:
format = "hspy"
if name is None:
name = self.component.name + '_' + self.name
filename = incremental_filename(slugify(name) + '.' + format)
if folder is not None:
filename = Path(folder).joinpath(filename)
self.as_signal().save(filename)
if save_std is True:
self.as_signal(field='std').save(append2pathname(
filename, '_std'))
def as_dictionary(self, fullcopy=True):
"""Returns parameter as a dictionary, saving all attributes from
self._whitelist.keys() For more information see
py:meth:`~hyperspy.misc.export_dictionary.export_to_dictionary`
Parameters
----------
fullcopy : Bool (optional, False)
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
Returns
-------
A dictionary, containing at least the following fields:
* _id_name: _id_name of the original parameter, used to create the
dictionary. Has to match with the self._id_name
* _twins: a list of ids of the twins of the parameter
* _whitelist: a dictionary, which keys are used as keywords to match
with the parameter attributes. For more information see
:py:func:`~hyperspy.misc.export_dictionary.export_to_dictionary`
* any field from _whitelist.keys()
"""
dic = {'_twins': [id(t) for t in self._twins]}
export_to_dictionary(self, self._whitelist, dic, fullcopy)
return dic
def default_traits_view(self):
# As mentioned above, the default editor for
# value = t.Property(t.Either([t.CFloat(0), Array()]))
# gives a ValueError. We therefore implement default_traits_view so
# that configure/edit_traits will still work straight out of the box.
# A whitelist controls which traits to include in this view.
from traitsui.api import RangeEditor, View, Item
whitelist = ['bmax', 'bmin', 'free', 'name', 'std', 'units', 'value']
editable_traits = [trait for trait in self.editable_traits()
if trait in whitelist]
if 'value' in editable_traits:
i = editable_traits.index('value')
v = editable_traits.pop(i)
editable_traits.insert(i, Item(
v, editor=RangeEditor(low_name='bmin', high_name='bmax')))
view = View(editable_traits, buttons=['OK', 'Cancel'])
return view
@add_gui_method(toolkey="hyperspy.Component")
class Component(t.HasTraits):
__axes_manager = None
active = t.Property(t.CBool(True))
name = t.Property(t.Str(''))
def __init__(self, parameter_name_list, linear_parameter_list=None):
self.events = Events()
self.events.active_changed = Event("""
Event that triggers when the `Component.active` changes.
The event triggers after the internal state of the `Component` has
been updated.
Arguments
---------
obj : Component
The `Component` that the event belongs to
active : bool
The new active state
""", arguments=["obj", 'active'])
self.parameters = []
self.init_parameters(parameter_name_list, linear_parameter_list)
self._update_free_parameters()
self.active = True
self._active_array = None # only if active_is_multidimensional is True
self.isbackground = False
self.convolved = True
self.parameters = tuple(self.parameters)
self._id_name = self.__class__.__name__
self._id_version = '1.0'
self._position = None
self.model = None
self.name = ''
self._whitelist = {'_id_name': None,
'name': None,
'active_is_multidimensional': None,
'_active_array': None,
'active': None
}
self._slicing_whitelist = {'_active_array': 'inav'}
self._slicing_order = ('active', 'active_is_multidimensional',
'_active_array',)
_name = ''
_active_is_multidimensional = False
_active = True
@property
def active_is_multidimensional(self):
"""In multidimensional signals it is possible to store the value of the
:py:attr:`~.component.Component.active` attribute at each navigation
index.
"""
return self._active_is_multidimensional
@active_is_multidimensional.setter
def active_is_multidimensional(self, value):
if not isinstance(value, bool):
raise ValueError('Only boolean values are permitted')
if value == self.active_is_multidimensional:
return
if value: # Turn on
if self._axes_manager.navigation_size < 2:
_logger.info('`navigation_size` < 2, skipping')
return
# Store value at current position
self._create_active_array()
self._store_active_value_in_array(self._active)
self._active_is_multidimensional = True
else: # Turn off
# Get the value at the current position before switching it off
self._active = self.active
self._active_array = None
self._active_is_multidimensional = False
def _get_name(self):
return self._name
def _set_name(self, value):
old_value = self._name
if old_value == value:
return
if self.model:
for component in self.model:
if value == component.name:
raise ValueError(
"Another component already has "
"the name " + str(value))
self._name = value
setattr(self.model.components, slugify(
value, valid_variable_name=True), self)
self.model.components.__delattr__(
slugify(old_value, valid_variable_name=True))
else:
self._name = value
self.trait_property_changed('name', old_value, self._name)
@property
def _axes_manager(self):
return self.__axes_manager
@_axes_manager.setter
def _axes_manager(self, value):
for parameter in self.parameters:
parameter._axes_manager = value
self.__axes_manager = value
@property
def _is_navigation_multidimensional(self):
if (self._axes_manager is None or not
self._axes_manager.navigation_dimension):
return False
else:
return True
def _get_active(self):
if self.active_is_multidimensional is True:
# The following should set
self.active = self._active_array[self._axes_manager.indices[::-1]]
return self._active
def _store_active_value_in_array(self, value):
self._active_array[self._axes_manager.indices[::-1]] = value
def _set_active(self, arg):
if self._active == arg:
return
old_value = self._active
self._active = arg
if self.active_is_multidimensional is True:
self._store_active_value_in_array(arg)
self.events.active_changed.trigger(active=self._active, obj=self)
self.trait_property_changed('active', old_value, self._active)
def init_parameters(self, parameter_name_list, linear_parameter_list=None):
"""
Initialise the parameters of the component.
Parameters
----------
parameter_name_list : list
The list of parameter names.
linear_parameter_list : list, optional
The list of linear parameter. The default is None.
Returns
-------
None.
"""
if linear_parameter_list is None:
linear_parameter_list = []
for name in parameter_name_list:
parameter = Parameter()
self.parameters.append(parameter)
parameter.name = name
if name in linear_parameter_list:
parameter._linear = True
parameter._id_name = name
setattr(self, name, parameter)
if hasattr(self, 'grad_' + name):
parameter.grad = getattr(self, 'grad_' + name)
parameter.component = self
self.add_trait(name, t.Instance(Parameter))
def _get_long_description(self):
if self.name:
text = '%s (%s component)' % (self.name, self.__class__.__name__)
else:
text = '%s component' % self.__class__.__name__
return text
def _get_short_description(self):
text = ''
if self.name:
text += self.name
else:
text += self.__class__.__name__
text += ' component'
return text
def __repr__(self):
text = '<%s>' % self._get_long_description()
return text
def _update_free_parameters(self):
self.free_parameters = sorted([par for par in self.parameters if
par.free], key=lambda x: x.name)
self._nfree_param = sum([par._number_of_elements for par in
self.free_parameters])
def update_number_parameters(self):
i = 0
for parameter in self.parameters:
i += parameter._number_of_elements
self.nparam = i
self._update_free_parameters()
def fetch_values_from_array(self, p, p_std=None, onlyfree=False):
"""Fetch the parameter values from an array `p` and optionally standard
deviation from `p_std`. Places them `component.parameter.value` and
`...std`, according to their position in the component.
Parameters
----------
p : array
array containing new values for the parameters in a component
p_std : array, optional
array containing the corresponding standard deviation.
"""
if onlyfree is True:
parameters = self.free_parameters
else:
parameters = self.parameters
i = 0
for parameter in sorted(parameters, key=lambda x: x.name):
length = parameter._number_of_elements
parameter.value = (p[i] if length == 1 else p[i:i + length])
if p_std is not None:
parameter.std = (p_std[i] if length == 1 else
tuple(p_std[i:i + length]))
i += length
def _create_active_array(self):
shape = self._axes_manager._navigation_shape_in_array
if len(shape) == 1 and shape[0] == 0:
shape = [1, ]
if (not isinstance(self._active_array, np.ndarray)
or self._active_array.shape != shape):
_logger.debug(f'Creating _active_array for {self}.\n\tCurrent '
f'array is:\n{self._active_array}')
self._active_array = np.ones(shape, dtype=bool)
def _create_arrays(self):
if self.active_is_multidimensional:
self._create_active_array()
for parameter in self.parameters:
parameter._create_array()
def store_current_parameters_in_map(self):
for parameter in self.parameters:
parameter.store_current_value_in_array()
def fetch_stored_values(self, only_fixed=False):
if self.active_is_multidimensional:
# Store the stored value in self._active and trigger the connected
# functions.
self.active = self.active
if only_fixed is True:
parameters = (set(self.parameters) -
set(self.free_parameters))
else:
parameters = self.parameters
parameters = [parameter for parameter in parameters
if (parameter.twin is None or
not isinstance(parameter.twin, Parameter))]
for parameter in parameters:
parameter.fetch()
def plot(self, only_free=True):
"""Plot the value of the parameters of the model
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will
be plotted
"""
if only_free:
parameters = self.free_parameters
else:
parameters = self.parameters
parameters = [k for k in parameters if k.twin is None]
for parameter in parameters:
parameter.plot()
def export(self, folder=None, format="hspy", save_std=False,
only_free=True):
"""Plot the value of the parameters of the model
Parameters
----------
folder : str or None
The path to the folder where the file will be saved. If
`None` the current folder is used by default.
format : str
The extension of the file format, default "hspy".
save_std : bool
If True, also the standard deviation will be saved.
only_free : bool
If True, only the value of the parameters that are free will be
exported.
Notes
-----
The name of the files will be determined by each the Component
and each Parameter name attributes. Therefore, it is possible to
customise the file names modify the name attributes.
"""
if only_free:
parameters = self.free_parameters
else:
parameters = self.parameters
parameters = [k for k in parameters if k.twin is None]
for parameter in parameters:
parameter.export(folder=folder, format=format,
save_std=save_std,)
def summary(self):
for parameter in self.parameters:
dim = len(parameter.map.squeeze().shape) if parameter.map \
is not None else 0
if parameter.twin is None:
if dim <= 1:
print('%s = %s ± %s %s' % (parameter.name,
parameter.value,
parameter.std,
parameter.units))
def __call__(self):
"""Returns the corresponding model for the current coordinates
Returns
-------
numpy array
"""
axis = self.model.axis.axis[self.model.channel_switches]
component_array = self.function(axis)
return component_array
def _component2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.model.axes_manager:
old_axes_manager = self.model.axes_manager
self.model.axes_manager = axes_manager
self.fetch_stored_values()
s = self.model.__call__(component_list=[self])
if not self.active:
s.fill(np.nan)
if old_axes_manager is not None:
self.model.axes_manager = old_axes_manager
self.charge()
if out_of_range2nans is True:
ns = np.empty(self.model.axis.axis.shape)
ns.fill(np.nan)
ns[self.model.channel_switches] = s
s = ns
if old_axes_manager is not None:
self.model.axes_manager = old_axes_manager
self.fetch_stored_values()
return s
def set_parameters_free(self, parameter_name_list=None, only_linear=False, only_nonlinear=False):
"""
Sets parameters in a component to free.
Parameters
----------
parameter_name_list : None or list of str, optional
If None, will set all the parameters to free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to free.
only_linear : bool
If True, only sets a parameter free if it is linear
only_nonlinear : bool
If True, only sets a parameter free if it is nonlinear
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v1.set_parameters_free()
>>> v1.set_parameters_free(parameter_name_list=['area','centre'])
>>> v1.set_parameters_free(linear=True)
See also
--------
set_parameters_not_free
hyperspy.model.BaseModel.set_parameters_free
hyperspy.model.BaseModel.set_parameters_not_free
"""
if only_linear and only_nonlinear:
raise ValueError(
"To set all parameters free, set both `only_linear` and "
"`only_nonlinear` to False."
)
parameter_list = []
if not parameter_name_list:
parameter_list = self.parameters
else:
for _parameter in self.parameters:
if _parameter.name in parameter_name_list:
parameter_list.append(_parameter)
for _parameter in parameter_list:
if not only_linear and not only_nonlinear:
_parameter.free = True
elif only_linear and _parameter._linear:
_parameter.free = True
elif only_nonlinear and not _parameter._linear:
_parameter.free = True
def set_parameters_not_free(self, parameter_name_list=None,
only_linear=False, only_nonlinear=False):
"""
Sets parameters in a component to not free.
Parameters
----------
parameter_name_list : None or list of str, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
only_linear : bool
If True, only sets a parameter not free if it is linear
only_nonlinear : bool
If True, only sets a parameter not free if it is nonlinear
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v1.set_parameters_not_free()
>>> v1.set_parameters_not_free(parameter_name_list=['area','centre'])
>>> v1.set_parameters_not_free(only_linear=True)
See also
--------
set_parameters_free
hyperspy.model.BaseModel.set_parameters_free
hyperspy.model.BaseModel.set_parameters_not_free
"""
if only_linear and only_nonlinear:
raise ValueError(
"To set all parameters not free, "
"set both only_linear and _nonlinear to False.")
parameter_list = []
if not parameter_name_list:
parameter_list = self.parameters
else:
for _parameter in self.parameters:
if _parameter.name in parameter_name_list:
parameter_list.append(_parameter)
for _parameter in parameter_list:
if not only_linear and not only_nonlinear:
_parameter.free = False
elif only_linear and _parameter._linear:
_parameter.free = False
elif only_nonlinear and not _parameter._linear:
_parameter.free = False
def _estimate_parameters(self, signal):
if self._axes_manager != signal.axes_manager:
self._axes_manager = signal.axes_manager
self._create_arrays()
def as_dictionary(self, fullcopy=True):
"""
Returns component as a dictionary. For more information on method
and conventions, see
:py:meth:`~hyperspy.misc.export_dictionary.export_to_dictionary`
Parameters
----------
fullcopy : Bool (optional, False)
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
Returns
-------
dic : dict
A dictionary, containing at least the following fields:
* parameters: a list of dictionaries of the parameters, one per
component.
* _whitelist: a dictionary with keys used as references saved
attributes, for more information, see
:py:func:`~hyperspy.misc.export_dictionary.export_to_dictionary`
* any field from _whitelist.keys()
"""
dic = {
'parameters': [
p.as_dictionary(fullcopy) for p in self.parameters]}
dic.update(get_object_package_info(self))
export_to_dictionary(self, self._whitelist, dic, fullcopy)
from hyperspy.model import _COMPONENTS
if self._id_name not in _COMPONENTS:
import dill
dic['_class_dump'] = dill.dumps(self.__class__)
return dic
def _load_dictionary(self, dic):
"""
Load data from dictionary.
Parameters
----------
dict : dict
A dictionary containing at least the following fields:
* _id_name: _id_name of the original parameter, used to create the
dictionary. Has to match with the self._id_name
* parameters: a list of dictionaries, one per parameter of the
component (see
:py:meth:`~hyperspy.component.Parameter.as_dictionary`
documentation for more details)
* _whitelist: a dictionary, which keys are used as keywords to
match with the parameter attributes. For more information see
:py:func:`~hyperspy.misc.export_dictionary.load_from_dictionary`
* any field from _whitelist.keys()
Returns
-------
twin_dict : dict
Dictionary of 'id' values from input dictionary as keys with all of
the parameters of the component, to be later used for setting up
correct twins.
"""
if dic['_id_name'] == self._id_name:
if (self._id_name == "Polynomial" and
Version(hyperspy.__version__) >= Version("2.0")):
# in HyperSpy 2.0 the polynomial definition changed
from hyperspy._components.polynomial import convert_to_polynomial
dic = convert_to_polynomial(dic)
load_from_dictionary(self, dic)
id_dict = {}
for p in dic['parameters']:
idname = p['_id_name']
if hasattr(self, idname):
par = getattr(self, idname)
t_id = par._load_dictionary(p)
id_dict[t_id] = par
else:
raise ValueError(
"_id_name of parameters in component and dictionary do not match")
return id_dict
else:
raise ValueError("_id_name of component and dictionary do not match, \ncomponent._id_name = %s\
\ndictionary['_id_name'] = %s" % (self._id_name, dic['_id_name']))
def print_current_values(self, only_free=False, fancy=True):
"""
Prints the current values of the component's parameters.
Parameters
----------
only_free : bool
If True, only free parameters will be printed.
fancy : bool
If True, attempts to print using html rather than text in the notebook.
"""
if fancy:
display(current_component_values(self, only_free=only_free))
else:
display_pretty(current_component_values(self, only_free=only_free))
@property
def _constant_term(self):
"""
Get value of any (non-free) constant term of the component.
Returns 0 for most components.
"""
return 0
def _compute_constant_term(self):
"""Gets the value of any (non-free) constant term, with convolution"""
model = self.model
if model.convolved and self.convolved:
data = convolve_component_values(self._constant_term, model=model)
else:
signal_shape = model.axes_manager.signal_shape[::-1]
data = self._constant_term * np.ones(signal_shape)
return data.T[np.where(model.channel_switches)[::-1]].T
def convolve_component_values(component_values, model):
"""
Convolve component with model convolution axis.
Multiply by np.ones in order to handle case where component_values is a
single constant
"""
sig = component_values * np.ones(model.convolution_axis.shape)
ll = model.low_loss(model.axes_manager)
convolved = np.convolve(sig, ll, mode="valid")
return convolved
def _get_scaling_factor(signal, axis, parameter):
"""
Convenience function to get the scaling factor required to take into
account binned and/or non-uniform axes.
Parameters
----------
signal : BaseSignal
axis : BaseDataAxis
parameter : float or numpy array
The axis value at which scaling factor is evaluated (ignored if the axis
is uniform)
Returns
-------
scaling_factor
"""
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
if axis.is_uniform:
scaling_factor = axis.scale
else:
parameter_idx = axis.value2index(parameter)
scaling_factor = np.gradient(axis.axis)[parameter_idx]
else:
scaling_factor = 1
return scaling_factor
| gpl-3.0 |
adamtiger/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 37 | 3651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
rahuldhote/scikit-learn | examples/feature_stacker.py | 245 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
rahuldhote/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 345 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
lucafon/ArtificialIntelligence | src/classification_sklearn/problem3_3.py | 1 | 7154 | '''
Created on Mar 7, 2017
@author: Luca Fontanili
'''
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegressionCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
def svm_linear(dataset, out):
print('svm_linear')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
tuned_parameters = [{'kernel': ['linear'], 'C': [0.1, 0.5, 1, 5, 10, 50, 100]}]
clf = GridSearchCV(SVC(), tuned_parameters, cv=5)
clf.fit(X_train, y_train)
best_param = clf.best_params_
print('best param: ' + str(best_param))
# means = clf.cv_results_['mean_test_score']
# stds = clf.cv_results_['std_test_score']
# for mean, std, params in zip(means, stds, clf.cv_results_['params']):
# print("%0.3f (+/-%0.03f) for %r"
# % (mean, std * 2, params))
best_score = clf.best_score_
print('best score: ' + str(best_score))
print('test score: ' + str(clf.score(X_test,y_test)))
out.write('svm_linear,' + str(best_score) + ',' + str(clf.score(X_test,y_test)) + '\n')
def svm_polynomial(dataset, out):
print('svm_polynomial')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
# tuned_parameters = [{'kernel': ['poly'], 'C': [0.1, 1, 3], 'degree':[4, 5, 6], 'gamma':[0.1,1]}]
tuned_parameters = [{'kernel': ['poly'], 'C': [0.1,1,3], 'degree':[4,5,6], 'gamma':[0.1,1]}]
clf = GridSearchCV(SVC(), tuned_parameters, cv=5)
clf.fit(X_train, y_train)
best_param = clf.best_params_
print('best param: ' + str(best_param))
best_score = clf.best_score_
print('best score: ' + str(best_score))
print('test score: ' + str(clf.score(X_test,y_test)))
out.write('svm_polynomial,' + str(best_score) + ',' + str(clf.score(X_test,y_test)) + '\n')
def svm_rbf(dataset, out):
print('svm_rbf')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
# tuned_parameters = [{'kernel': ['poly'], 'C': [0.1, 1, 3], 'degree':[4, 5, 6], 'gamma':[0.1,1]}]
tuned_parameters = [{'kernel': ['rbf'], 'C': [0.1, 0.5, 1, 5, 10, 50, 100], 'gamma':[0.1, 0.5, 1, 3, 6, 10]}]
clf = GridSearchCV(SVC(), tuned_parameters, cv=5)
clf.fit(X_train, y_train)
best_param = clf.best_params_
print('best param: ' + str(best_param))
best_score = clf.best_score_
print('best score: ' + str(best_score))
print('test score: ' + str(clf.score(X_test,y_test)))
out.write('svm_rbf,' + str(best_score) + ',' + str(clf.score(X_test,y_test)) + '\n')
def logistic(dataset, out):
print('logistic')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
logreg = LogisticRegressionCV(Cs =[0.1, 0.5, 1, 5, 10, 50, 100] , cv=5)
logreg.fit(X_train, y_train)
print('best score: ' + str(logreg.scores_[1].max()))
print('test score: ' + str(logreg.score(X_test, y_test)))
out.write('logistic,' + str(logreg.scores_[1].max()) + ',' + str(logreg.score(X_test, y_test)) + '\n')
def knn(dataset, out):
print('knn')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
best_score = 0
best_negih = None
for k in range(1,51):
for leaf_size in range(5,65,5):
neigh = KNeighborsClassifier(n_neighbors=k, leaf_size=leaf_size)
neigh.fit(X_train, y_train)
current_test_score = cross_val_score(neigh, X_train, y_train, cv=5).mean()
if current_test_score > best_score:
best_score = current_test_score
best_negih = neigh
print('best score: ' + str(best_score))
test_score = best_negih.score(X_test, y_test)
print('test score: ' + str(test_score))
out.write('knn,' + str(best_score) + ',' + str(test_score) + '\n')
def decision_tree(dataset, out):
print('decision_tree')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
best_score = 0
best_clf = None
for max_depth in range(1,51):
for min_sample in range(2,11):
clf = DecisionTreeClassifier(max_depth=max_depth, min_samples_split=min_sample)
clf.fit(X_train, y_train)
current_test_score = cross_val_score(clf, X_train, y_train, cv=5).mean()
if current_test_score > best_score:
best_score = current_test_score
best_clf = clf
print('best score: ' + str(best_score))
test_score = best_clf.score(X_test, y_test)
print('test score: ' + str(test_score))
out.write('decision_tree,' + str(best_score) + ',' + str(test_score) + '\n')
def random_forest(dataset, out):
print('random_forest')
X = dataset[['x', 'y']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
best_score = 0
best_clf = None
for max_depth in range(1,51):
for min_sample in range(2,11):
print('md: ', max_depth, ' ms: ', min_sample)
clf = RandomForestClassifier(max_depth=max_depth, min_samples_split=min_sample)
clf.fit(X_train, y_train)
current_test_score = cross_val_score(clf, X_train, y_train, cv=5).mean()
if current_test_score > best_score:
best_score = current_test_score
best_clf = clf
print('best score: ' + str(best_score))
test_score = best_clf.score(X_test, y_test)
print('test score: ' + str(test_score))
out.write('random_forest,' + str(best_score) + ',' + str(test_score))
def main():
inp = open('input3.csv', 'r')
out = open('output3.csv', 'w')
values = []
for line in inp:
if 'label' in line:
continue
params = line.strip().split(",")
values.append((float(params[0]),float(params[1]),int(params[2])))
dataset = pd.DataFrame(values,columns=['x','y','label'])
# dataset.plot(x='x', y='y', kind='scatter', c='label')
# plt.show()
svm_linear(dataset, out)
svm_polynomial(dataset, out)
svm_rbf(dataset, out)
logistic(dataset, out)
knn(dataset, out)
decision_tree(dataset, out)
random_forest(dataset, out)
# clf = SVC()
# clf.fit(X, y)
# SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True,tol=0.001, verbose=False)
plt.show()
if __name__ == '__main__':
main() | mit |
kearnsw/Twitt.IR | src/AnnotweetClassifier2.py | 1 | 4819 | from pymongo import MongoClient
from config import MONGO_URI
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.datasets import fetch_20newsgroups
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
import sys
import numpy as np
from random import shuffle
import pathDictionary
import cPickle
charsToRemove = ['"', ':', ')', '(', '[', ']', ',', '!', "?"]
def lexicon_classifier(tweets, concept):
if concept == "humor":
features = ["\\U0001f602", "lol", "lmao", "rofl", "haha", "hahaha",
"welp", "arse", "ass", "\\U0001f606", "Drumpf",
"skeeter", "fun"]
elif concept == "mistrust":
features = ["illegal", "illegals", "immigrants", "mexicans", "prince",
"false", "fake", "scheme", "hoax", "conspiracy",
"monsanto", "depopulation", "gmo" "hype", "bio-warfare",
"fabricating", "fabrication"]
elif concept == "relief":
features = ["test", "fda", "unveils", "uniforms", "zikaproof",
"deal", "approval", "diagnostics", "billboard", "cure"]
predictions = []
for tweet in tweets:
for char in charsToRemove:
tweet = tweet.replace(char, " ")
parsed = tweet.encode("unicode-escape").replace("\\", " \\")
parsed = " ".join(parsed).lower().split()
for feature in features:
if feature.lower() in parsed:
prediction = 1
break
else:
prediction = 0
predictions.append(prediction)
return predictions
# Import training data
client = MongoClient(MONGO_URI)
db = client.get_default_database()
cursor = db["submissions"].find({})
submissions = []
for document in cursor:
if "tweet" in document:
submissions.append(document)
shuffle(submissions)
clusterMap = pathDictionary.getDictionary()
corpus = []
target = []
raw = []
humor_words = []
query = sys.argv[1]
count = 0
for submission in submissions:
raw.append(submission["tweet"]["text"])
for char in charsToRemove:
submission["tweet"]["text"] = submission["tweet"]["text"].replace(char, " ")
submission["tweet"]["text"] = submission["tweet"]["text"].encode("unicode-escape").replace("\\", " \\").strip().lower()
tokens = []
for token in submission["tweet"]["text"].split(" "):
if token in clusterMap.keys():
tokens.append(clusterMap[token])
else:
tokens.append(token)
submission["tweet"]["text"] = " ".join(tokens)
corpus.append(submission["tweet"]["text"])
if submission["data"][query] == "true":
target.append(1)
count += 1
else:
target.append(0)
print count
print corpus
# Separate test data from the training data
# Training Data is the first 90%, i.e [0, (.9 * size)]
train_corpus = corpus[:int((.9 * len(corpus)))]
train_target = target[:int((.9 * len(corpus)))]
raw_train = raw[:int((.9 * len(corpus)))]
# Test Data is the last 10%, i.e [(.9 * size), size]
test_corpus = corpus[int((.9 * len(corpus))):]
test_target = target[int((.9 * len(corpus))):]
raw_test = raw[int((.9 * len(corpus))):]
# Count features
count_vect = CountVectorizer(ngram_range=(1, 2))
train_counts = count_vect.fit_transform(train_corpus)
with open("count_vect.pk1", "wb") as f:
cPickle.dump(count_vect, f)
# Convert to tfidf
tfidf_transformer = TfidfTransformer()
train_tfidf = tfidf_transformer.fit_transform(train_counts)
with open("tfidf.pk1", "wb") as f:
cPickle.dump(tfidf_transformer, f)
# Train classifier
clf = MultinomialNB().fit(train_tfidf, train_target)
# Transform test data
test_counts = count_vect.transform(test_corpus)
test_tfidf = tfidf_transformer.transform(test_counts)
# Predict test values
if query == "concern":
predicted = clf.predict(test_tfidf)
else:
predicted = lexicon_classifier(raw_test, query)
print predicted
print np.mean(predicted == test_target)
print "--------------------------------------"
metrics = metrics.classification_report(test_target, predicted,
target_names=['other', query])
for i in range(len(predicted)):
if predicted[i] != test_target[i] and test_target[i] == 0:
print("False Positive: " + raw_test[i])
elif predicted[i] != test_target[i] and test_target[i] == 1:
print("False Negative: " + raw_test[i])
else:
if test_target[i] == 1:
print("Correctly Identified: " + raw_test[i])
print metrics
with open("concernClassifier.pk1", "wb") as f:
cPickle.dump(clf, f)
"""
features = extract_features(submissions)
unigram_features = features["word_features"]
bigram_features = features["bigram_features"]
"""
| gpl-3.0 |
Ldpe2G/mxnet | example/svm_mnist/svm_mnist.py | 7 | 3545 |
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input to a value inside [0,1],
# and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size)
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size)
# A quick work around to prevent mxnet complaining the lack of a softmax_label
train_iter.label = mx.io._init_data(Y_train, allow_empty=True, default_name='svm_label')
test_iter.label = mx.io._init_data(Y_test, allow_empty=True, default_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
jjardel/probablyPOTUS | model/src/_model.py | 2 | 5837 | # python STL
import re
import os
from datetime import datetime
# third party modules
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
from sklearn.externals import joblib
import numpy as np
import matplotlib.pyplot as plt
# model utils
from model.src import DFColumnExtractor, twitter_tokenizer
# other utils
from lib.utils.lw import get_logger
from lib.utils.db_conn import DBConn
WORKING_DIR = os.getenv('WORKING_DIR')
CONFIG = '{0}/config/db_creds_local.json'.format(WORKING_DIR)
TEXT_FEATURES = 'text'
NON_TEXT_FEATURES = [
#'favorites',
#'retweets',
#'retweets_to_faves',
'num_exclamation_points',
'num_uppercase_strings',
'is_trump_retweet'
#'is_tweetstorm' # this feature has some leakage of the label
]
LABEL = 'tweet_source'
class BaseModel(object):
def __init__(self, table_name, schema_name, **params_dict):
self.table_name = table_name
self.schema_name = schema_name
self.logger = get_logger(__name__)
self.params = params_dict
self._random_state = 42
self.data = None
self.train_inds_ = None
self.test_inds_ = None
self.model_ = None
self.gs_ = None
@property
def _estimator(self):
raise NotImplementedError('this method must be overriden')
def _get_data(self):
# retreive data from DB
conn = DBConn(CONFIG)
data = conn.export(self.table_name, schema=self.schema_name)
# zero-one encoding for labels
data.tweet_source = data.tweet_source.apply(lambda x: 1 if x == 'android' else 0)
# standardize all urls
data.text = data.text.str.replace('https?:\/\/t.co\/[a-zA-Z0-9\-\.]{8,}', 'twitter_url ')
self.data = data
def _get_train_test_split(self, train_size=None):
"""
Perform stratified train/test split. Sets the indices for the train/test sets
"""
idx_array = np.arange(len(self.data))
self.train_inds_, self.test_inds_, _, _ = train_test_split(
idx_array,
self.data[LABEL].values,
train_size=train_size,
stratify=self.data[LABEL].values,
random_state=self._random_state
)
def train(self, train_size=0.8, k_folds=5):
# retrieve data from DB and pre-process
self._get_data()
# perform train/test split
self._get_train_test_split(train_size=train_size)
# define text pre-processing pipeline
text_pipeline = Pipeline([
('extract_text', DFColumnExtractor(TEXT_FEATURES)),
('vect', TfidfVectorizer(tokenizer=twitter_tokenizer))
])
# define pipeline for pre-processing of numeric features
numeric_pipeline = Pipeline([
('extract_nums', DFColumnExtractor(NON_TEXT_FEATURES)),
('scaler', MinMaxScaler())
])
# combine both steps into a single pipeline
pipeline = Pipeline([
('features', FeatureUnion([
('text_processing', text_pipeline),
('num_processing', numeric_pipeline)
])),
('clf', self._estimator)
])
self.logger.info('Fitting model hyperparameters with {0}-fold CV'.format(k_folds))
gs = GridSearchCV(pipeline, self.params, n_jobs=-1, cv=k_folds)
X = self.data.iloc[self.train_inds_, :]
y = self.data[LABEL].values[self.train_inds_]
gs.fit(X, y)
self.logger.info('Validation set accuracy is {0}'.format(gs.best_score_))
self.gs_ = gs
self.model_ = gs.best_estimator_
def evaluate(self):
if not self.model_:
raise AttributeError('No model attribute found. Must run train() method first')
X_test = self.data.iloc[self.test_inds_, :]
y_test = self.data[LABEL].values[self.test_inds_]
y_preds = self.model_.predict(X_test)
self.logger.info('test set accuracy is {0}'.format(accuracy_score(y_test, y_preds)))
# ROC Curve
y_probs = self.model_.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_probs)
auc = roc_auc_score(y_test, y_probs)
self.logger.info('ROC AUC for the test set: {0}'.format(auc))
plt.plot(fpr, tpr, lw=2, color='r', label='AUC: {:.2}'.format(auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot([0, 1], [0, 1], linestyle='--')
plt.legend()
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
plt.savefig('{0}/model/saved_models/roc_{1}.png'.format(WORKING_DIR, ts))
def save(self, filebase):
# re-train best model on full data set
self.model_.fit(self.data, self.data[LABEL].values)
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
# logging wrappers don't serialize
del self.logger
joblib.dump(self,'{0}/model_{1}.pkl'.format(filebase, ts))
class NaiveBayesModel(BaseModel):
@property
def _estimator(self):
return MultinomialNB()
class RandomForestModel(BaseModel):
@property
def _estimator(self):
return RandomForestClassifier()
if __name__ == '__main__':
nb_params = {
'features__text_processing__vect__ngram_range': [(1, 1), (1, 2), (1, 3)],
'clf__alpha': np.logspace(-2, 0, num=10)
}
m = NaiveBayesModel('crazy_tweet_features', 'clean', **nb_params)
m.train()
m.save('.')
print('test') | gpl-3.0 |
matthew-tucker/mne-python | examples/inverse/plot_gamma_map_inverse.py | 30 | 2316 | """
===============================================================================
Compute a sparse inverse solution using the Gamma-Map empirical Bayesian method
===============================================================================
See Wipf et al. "A unified Bayesian framework for MEG/EEG source imaging."
NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.
"""
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.inverse_sparse import gamma_map
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
# Read the evoked response and crop it
condition = 'Left visual'
evoked = mne.read_evokeds(evoked_fname, condition=condition,
baseline=(None, 0))
evoked.crop(tmin=-50e-3, tmax=300e-3)
# Read the forward solution
forward = mne.read_forward_solution(fwd_fname, surf_ori=True,
force_fixed=False)
# Read noise noise covariance matrix and regularize it
cov = mne.read_cov(cov_fname)
cov = mne.cov.regularize(cov, evoked.info)
# Run the Gamma-MAP method
alpha = 0.5
stc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True,
return_residual=True)
# View in 2D and 3D ("glass" brain like 3D plot)
# Show the sources as spheres scaled by their strength
scale_factors = np.max(np.abs(stc.data), axis=1)
scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1),
modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),
fig_name="Gamma-MAP")
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
proj=True)
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 269 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
mr3bn/DAT210x | Module6/assignment1.py | 1 | 5224 | import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
#
# INFO: Your Parameters.
# You can adjust them after completing the lab
C = 1
kernel = 'linear'
iterations = 5000 # TODO: Change to 200000 once you get to Question#2
#
# INFO: You can set this to false if you want to
# draw the full square matrix
FAST_DRAW = True
def drawPlots(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
# INFO: A convenience function for you
# You can use this to break any higher-dimensional space down
# And view cross sections of it.
# If this line throws an error, use plt.style.use('ggplot') instead
mpl.style.use('ggplot') # Look Pretty
padding = 3
resolution = 0.5
max_2d_score = 0
y_colors = ['#ff0000', '#00ff00', '#0000ff']
my_cmap = mpl.colors.ListedColormap(['#ffaaaa', '#aaffaa', '#aaaaff'])
colors = [y_colors[i] for i in y_train]
num_columns = len(X_train.columns)
fig = plt.figure()
fig.canvas.set_window_title(wintitle)
cnt = 0
for col in range(num_columns):
for row in range(num_columns):
# Easy out
if FAST_DRAW and col > row:
cnt += 1
continue
ax = plt.subplot(num_columns, num_columns, cnt + 1)
plt.xticks(())
plt.yticks(())
# Intersection:
if col == row:
plt.text(0.5, 0.5, X_train.columns[row], verticalalignment='center', horizontalalignment='center', fontsize=12)
cnt += 1
continue
# Only select two features to display, then train the model
X_train_bag = X_train.ix[:, [row,col]]
X_test_bag = X_test.ix[:, [row,col]]
model.fit(X_train_bag, y_train)
# Create a mesh to plot in
x_min, x_max = X_train_bag.ix[:, 0].min() - padding, X_train_bag.ix[:, 0].max() + padding
y_min, y_max = X_train_bag.ix[:, 1].min() - padding, X_train_bag.ix[:, 1].max() + padding
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# Plot Boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Prepare the contour
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=my_cmap, alpha=0.8)
plt.scatter(X_train_bag.ix[:, 0], X_train_bag.ix[:, 1], c=colors, alpha=0.5)
score = round(model.score(X_test_bag, y_test) * 100, 3)
plt.text(0.5, 0, "Score: {0}".format(score), transform = ax.transAxes, horizontalalignment='center', fontsize=8)
max_2d_score = score if score > max_2d_score else max_2d_score
cnt += 1
print "Max 2D Score: ", max_2d_score
fig.set_tight_layout(True)
def benchmark(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):
print '\n\n' + wintitle + ' Results'
s = time.time()
for i in range(iterations):
#
# TODO: train the classifier on the training data / labels:
#
model.fit(X_train, y_train)
print "{0} Iterations Training Time: ".format(iterations), time.time() - s
s = time.time()
for i in range(iterations):
#
# TODO: score the classifier on the testing data / labels:
#
score = model.score(X_test, y_test)
print "{0} Iterations Scoring Time: ".format(iterations), time.time() - s
print "High-Dimensionality Score: ", round((score*100), 3)
#
# TODO: Load up the wheat dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled, nor weird headers...
X= pd.read_csv('Datasets/wheat.data', index_col = 0, header=0)
# INFO: An easy way to show which rows have nans in them
print X[pd.isnull(X).any(axis=1)]
#
# TODO: Go ahead and drop any row with a nan
#
X = X.dropna()
#
# INFO: # In the future, you might try setting the nan values to the
# mean value of that column, the mean should only be calculated for
# the specific class rather than across all classes, now that you
# have the labels
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
#
y = X['wheat_type'].map({'canadian' : 0, 'kama' : 1, 'rosa' : 2})
del X['wheat_type']
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7.
# Use variable names: X_train, X_test, y_train, y_test
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 7)
#
# TODO: Create an SVC classifier named svc
# Use a linear kernel, and set the C value to C
#
from sklearn.svm import SVC
svc = SVC(kernel='linear')
#
# TODO: Create an KNeighbors classifier named knn
# Set the neighbor count to 5
#
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
benchmark(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
drawPlots(knn, X_train, X_test, y_train, y_test, 'KNeighbors')
benchmark(svc, X_train, X_test, y_train, y_test, 'SVC')
drawPlots(svc, X_train, X_test, y_train, y_test, 'SVC')
plt.show()
#
# BONUS: After submitting your answers, toy around with
# gamma, kernel, and C.
| mit |
uber/pyro | examples/baseball.py | 1 | 16326 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import math
import pandas as pd
import torch
import pyro
from pyro.distributions import Beta, Binomial, HalfCauchy, Normal, Pareto, Uniform
from pyro.distributions.util import scalar_like
from pyro.infer import MCMC, NUTS, Predictive
from pyro.infer.mcmc.util import initialize_model, summary
from pyro.util import ignore_experimental_warning
"""
Example has been adapted from [1]. It demonstrates how to do Bayesian inference using
NUTS (or, HMC) in Pyro, and use of some common inference utilities.
As in the Stan tutorial, this uses the small baseball dataset of Efron and Morris [2]
to estimate players' batting average which is the fraction of times a player got a
base hit out of the number of times they went up at bat.
The dataset separates the initial 45 at-bats statistics from the remaining season.
We use the hits data from the initial 45 at-bats to estimate the batting average
for each player. We then use the remaining season's data to validate the predictions
from our models.
Three models are evaluated:
- Complete pooling model: The success probability of scoring a hit is shared
amongst all players.
- No pooling model: Each individual player's success probability is distinct and
there is no data sharing amongst players.
- Partial pooling model: A hierarchical model with partial data sharing.
We recommend Radford Neal's tutorial on HMC ([3]) to users who would like to get a
more comprehensive understanding of HMC and its variants, and to [4] for details on
the No U-Turn Sampler, which provides an efficient and automated way (i.e. limited
hyper-parameters) of running HMC on different problems.
[1] Carpenter B. (2016), ["Hierarchical Partial Pooling for Repeated Binary Trials"]
(http://mc-stan.org/users/documentation/case-studies/pool-binary-trials.html).
[2] Efron B., Morris C. (1975), "Data analysis using Stein's estimator and its
generalizations", J. Amer. Statist. Assoc., 70, 311-319.
[3] Neal, R. (2012), "MCMC using Hamiltonian Dynamics",
(https://arxiv.org/pdf/1206.1901.pdf)
[4] Hoffman, M. D. and Gelman, A. (2014), "The No-U-turn sampler: Adaptively setting
path lengths in Hamiltonian Monte Carlo", (https://arxiv.org/abs/1111.4246)
"""
logging.basicConfig(format="%(message)s", level=logging.INFO)
DATA_URL = "https://d2hg8soec8ck9v.cloudfront.net/datasets/EfronMorrisBB.txt"
# ===================================
# MODELS
# ===================================
def fully_pooled(at_bats, hits):
r"""
Number of hits in $K$ at bats for each player has a Binomial
distribution with a common probability of success, $\phi$.
:param (torch.Tensor) at_bats: Number of at bats for each player.
:param (torch.Tensor) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
phi_prior = Uniform(scalar_like(at_bats, 0), scalar_like(at_bats, 1))
phi = pyro.sample("phi", phi_prior)
num_players = at_bats.shape[0]
with pyro.plate("num_players", num_players):
return pyro.sample("obs", Binomial(at_bats, phi), obs=hits)
def not_pooled(at_bats, hits):
r"""
Number of hits in $K$ at bats for each player has a Binomial
distribution with independent probability of success, $\phi_i$.
:param (torch.Tensor) at_bats: Number of at bats for each player.
:param (torch.Tensor) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
num_players = at_bats.shape[0]
with pyro.plate("num_players", num_players):
phi_prior = Uniform(scalar_like(at_bats, 0), scalar_like(at_bats, 1))
phi = pyro.sample("phi", phi_prior)
return pyro.sample("obs", Binomial(at_bats, phi), obs=hits)
def partially_pooled(at_bats, hits):
r"""
Number of hits has a Binomial distribution with independent
probability of success, $\phi_i$. Each $\phi_i$ follows a Beta
distribution with concentration parameters $c_1$ and $c_2$, where
$c_1 = m * kappa$, $c_2 = (1 - m) * kappa$, $m ~ Uniform(0, 1)$,
and $kappa ~ Pareto(1, 1.5)$.
:param (torch.Tensor) at_bats: Number of at bats for each player.
:param (torch.Tensor) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
num_players = at_bats.shape[0]
m = pyro.sample("m", Uniform(scalar_like(at_bats, 0), scalar_like(at_bats, 1)))
kappa = pyro.sample(
"kappa", Pareto(scalar_like(at_bats, 1), scalar_like(at_bats, 1.5))
)
with pyro.plate("num_players", num_players):
phi_prior = Beta(m * kappa, (1 - m) * kappa)
phi = pyro.sample("phi", phi_prior)
return pyro.sample("obs", Binomial(at_bats, phi), obs=hits)
def partially_pooled_with_logit(at_bats, hits):
r"""
Number of hits has a Binomial distribution with a logit link function.
The logits $\alpha$ for each player is normally distributed with the
mean and scale parameters sharing a common prior.
:param (torch.Tensor) at_bats: Number of at bats for each player.
:param (torch.Tensor) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
num_players = at_bats.shape[0]
loc = pyro.sample("loc", Normal(scalar_like(at_bats, -1), scalar_like(at_bats, 1)))
scale = pyro.sample("scale", HalfCauchy(scale=scalar_like(at_bats, 1)))
with pyro.plate("num_players", num_players):
alpha = pyro.sample("alpha", Normal(loc, scale))
return pyro.sample("obs", Binomial(at_bats, logits=alpha), obs=hits)
# ===================================
# DATA SUMMARIZE UTILS
# ===================================
def get_summary_table(
posterior,
sites,
player_names,
transforms={},
diagnostics=False,
group_by_chain=False,
):
"""
Return summarized statistics for each of the ``sites`` in the
traces corresponding to the approximate posterior.
"""
site_stats = {}
for site_name in sites:
marginal_site = posterior[site_name].cpu()
if site_name in transforms:
marginal_site = transforms[site_name](marginal_site)
site_summary = summary(
{site_name: marginal_site}, prob=0.5, group_by_chain=group_by_chain
)[site_name]
if site_summary["mean"].shape:
site_df = pd.DataFrame(site_summary, index=player_names)
else:
site_df = pd.DataFrame(site_summary, index=[0])
if not diagnostics:
site_df = site_df.drop(["n_eff", "r_hat"], axis=1)
site_stats[site_name] = site_df.astype(float).round(2)
return site_stats
def train_test_split(pd_dataframe):
"""
Training data - 45 initial at-bats and hits for each player.
Validation data - Full season at-bats and hits for each player.
"""
device = torch.Tensor().device
train_data = torch.tensor(
pd_dataframe[["At-Bats", "Hits"]].values, dtype=torch.float, device=device
)
test_data = torch.tensor(
pd_dataframe[["SeasonAt-Bats", "SeasonHits"]].values,
dtype=torch.float,
device=device,
)
first_name = pd_dataframe["FirstName"].values
last_name = pd_dataframe["LastName"].values
player_names = [
" ".join([first, last]) for first, last in zip(first_name, last_name)
]
return train_data, test_data, player_names
# ===================================
# MODEL EVALUATION UTILS
# ===================================
def sample_posterior_predictive(model, posterior_samples, baseball_dataset):
"""
Generate samples from posterior predictive distribution.
"""
train, test, player_names = train_test_split(baseball_dataset)
at_bats = train[:, 0]
at_bats_season = test[:, 0]
logging.Formatter("%(message)s")
logging.info("\nPosterior Predictive:")
logging.info("Hit Rate - Initial 45 At Bats")
logging.info("-----------------------------")
# set hits=None to convert it from observation node to sample node
train_predict = Predictive(model, posterior_samples)(at_bats, None)
train_summary = get_summary_table(
train_predict, sites=["obs"], player_names=player_names
)["obs"]
train_summary = train_summary.assign(ActualHits=baseball_dataset[["Hits"]].values)
logging.info(train_summary)
logging.info("\nHit Rate - Season Predictions")
logging.info("-----------------------------")
with ignore_experimental_warning():
test_predict = Predictive(model, posterior_samples)(at_bats_season, None)
test_summary = get_summary_table(
test_predict, sites=["obs"], player_names=player_names
)["obs"]
test_summary = test_summary.assign(
ActualHits=baseball_dataset[["SeasonHits"]].values
)
logging.info(test_summary)
def evaluate_pointwise_pred_density(model, posterior_samples, baseball_dataset):
"""
Evaluate the log probability density of observing the unseen data (season hits)
given a model and posterior distribution over the parameters.
"""
_, test, player_names = train_test_split(baseball_dataset)
at_bats_season, hits_season = test[:, 0], test[:, 1]
trace = Predictive(model, posterior_samples).get_vectorized_trace(
at_bats_season, hits_season
)
# Use LogSumExp trick to evaluate $log(1/num_samples \sum_i p(new_data | \theta^{i})) $,
# where $\theta^{i}$ are parameter samples from the model's posterior.
trace.compute_log_prob()
post_loglik = trace.nodes["obs"]["log_prob"]
# computes expected log predictive density at each data point
exp_log_density = (post_loglik.logsumexp(0) - math.log(post_loglik.shape[0])).sum()
logging.info("\nLog pointwise predictive density")
logging.info("--------------------------------")
logging.info("{:.4f}\n".format(exp_log_density))
def main(args):
baseball_dataset = pd.read_csv(DATA_URL, "\t")
train, _, player_names = train_test_split(baseball_dataset)
at_bats, hits = train[:, 0], train[:, 1]
logging.info("Original Dataset:")
logging.info(baseball_dataset)
# (1) Full Pooling Model
# In this model, we illustrate how to use MCMC with general potential_fn.
init_params, potential_fn, transforms, _ = initialize_model(
fully_pooled,
model_args=(at_bats, hits),
num_chains=args.num_chains,
jit_compile=args.jit,
skip_jit_warnings=True,
)
nuts_kernel = NUTS(potential_fn=potential_fn)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
initial_params=init_params,
transforms=transforms,
)
mcmc.run(at_bats, hits)
samples_fully_pooled = mcmc.get_samples()
logging.info("\nModel: Fully Pooled")
logging.info("===================")
logging.info("\nphi:")
logging.info(
get_summary_table(
mcmc.get_samples(group_by_chain=True),
sites=["phi"],
player_names=player_names,
diagnostics=True,
group_by_chain=True,
)["phi"]
)
num_divergences = sum(map(len, mcmc.diagnostics()["divergences"].values()))
logging.info("\nNumber of divergent transitions: {}\n".format(num_divergences))
sample_posterior_predictive(fully_pooled, samples_fully_pooled, baseball_dataset)
evaluate_pointwise_pred_density(
fully_pooled, samples_fully_pooled, baseball_dataset
)
# (2) No Pooling Model
nuts_kernel = NUTS(not_pooled, jit_compile=args.jit, ignore_jit_warnings=True)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
)
mcmc.run(at_bats, hits)
samples_not_pooled = mcmc.get_samples()
logging.info("\nModel: Not Pooled")
logging.info("=================")
logging.info("\nphi:")
logging.info(
get_summary_table(
mcmc.get_samples(group_by_chain=True),
sites=["phi"],
player_names=player_names,
diagnostics=True,
group_by_chain=True,
)["phi"]
)
num_divergences = sum(map(len, mcmc.diagnostics()["divergences"].values()))
logging.info("\nNumber of divergent transitions: {}\n".format(num_divergences))
sample_posterior_predictive(not_pooled, samples_not_pooled, baseball_dataset)
evaluate_pointwise_pred_density(not_pooled, samples_not_pooled, baseball_dataset)
# (3) Partially Pooled Model
nuts_kernel = NUTS(partially_pooled, jit_compile=args.jit, ignore_jit_warnings=True)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
)
mcmc.run(at_bats, hits)
samples_partially_pooled = mcmc.get_samples()
logging.info("\nModel: Partially Pooled")
logging.info("=======================")
logging.info("\nphi:")
logging.info(
get_summary_table(
mcmc.get_samples(group_by_chain=True),
sites=["phi"],
player_names=player_names,
diagnostics=True,
group_by_chain=True,
)["phi"]
)
num_divergences = sum(map(len, mcmc.diagnostics()["divergences"].values()))
logging.info("\nNumber of divergent transitions: {}\n".format(num_divergences))
sample_posterior_predictive(
partially_pooled, samples_partially_pooled, baseball_dataset
)
evaluate_pointwise_pred_density(
partially_pooled, samples_partially_pooled, baseball_dataset
)
# (4) Partially Pooled with Logit Model
nuts_kernel = NUTS(
partially_pooled_with_logit, jit_compile=args.jit, ignore_jit_warnings=True
)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
)
mcmc.run(at_bats, hits)
samples_partially_pooled_logit = mcmc.get_samples()
logging.info("\nModel: Partially Pooled with Logit")
logging.info("==================================")
logging.info("\nSigmoid(alpha):")
logging.info(
get_summary_table(
mcmc.get_samples(group_by_chain=True),
sites=["alpha"],
player_names=player_names,
transforms={"alpha": torch.sigmoid},
diagnostics=True,
group_by_chain=True,
)["alpha"]
)
num_divergences = sum(map(len, mcmc.diagnostics()["divergences"].values()))
logging.info("\nNumber of divergent transitions: {}\n".format(num_divergences))
sample_posterior_predictive(
partially_pooled_with_logit, samples_partially_pooled_logit, baseball_dataset
)
evaluate_pointwise_pred_density(
partially_pooled_with_logit, samples_partially_pooled_logit, baseball_dataset
)
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="Baseball batting average using HMC")
parser.add_argument("-n", "--num-samples", nargs="?", default=200, type=int)
parser.add_argument("--num-chains", nargs="?", default=4, type=int)
parser.add_argument("--warmup-steps", nargs="?", default=100, type=int)
parser.add_argument("--rng_seed", nargs="?", default=0, type=int)
parser.add_argument(
"--jit", action="store_true", default=False, help="use PyTorch jit"
)
parser.add_argument(
"--cuda", action="store_true", default=False, help="run this example in GPU"
)
args = parser.parse_args()
# work around the error "CUDA error: initialization error"
# see https://github.com/pytorch/pytorch/issues/2517
torch.multiprocessing.set_start_method("spawn")
pyro.set_rng_seed(args.rng_seed)
# Enable validation checks
# work around with the error "RuntimeError: received 0 items of ancdata"
# see https://discuss.pytorch.org/t/received-0-items-of-ancdata-pytorch-0-4-0/19823
torch.multiprocessing.set_sharing_strategy("file_system")
if args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
main(args)
| apache-2.0 |
davidkunio/dedupe | tests/canonical_test.py | 3 | 3235 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from future.utils import viewitems
from builtins import range
from itertools import combinations
import csv
import exampleIO
import dedupe
import os
import time
import optparse
import logging
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose is not None :
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
#logging.basicConfig(level=log_level)
#import random
#import sys
#random.seed(365072799328404092)
def canonicalImport(filename):
preProcess = exampleIO.preProcess
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for (i, row) in enumerate(reader):
clean_row = [(k, preProcess(v)) for (k, v) in
viewitems(row)]
data_d[i] = dedupe.core.frozendict(clean_row)
return data_d, reader.fieldnames
def evaluateDuplicates(found_dupes, true_dupes):
true_positives = found_dupes.intersection(true_dupes)
false_positives = found_dupes.difference(true_dupes)
uncovered_dupes = true_dupes.difference(found_dupes)
print('found duplicate')
print(len(found_dupes))
print('precision')
print(1 - len(false_positives) / float(len(found_dupes)))
print('recall')
print(len(true_positives) / float(len(true_dupes)))
settings_file = 'canonical_learned_settings.json'
raw_data = 'tests/datasets/restaurant-nophone-training.csv'
data_d, header = canonicalImport(raw_data)
training_pairs = dedupe.trainingDataDedupe(data_d,
'unique_id',
5000)
duplicates_s = set(frozenset(pair) for pair in training_pairs['match'])
t0 = time.time()
print('number of known duplicate pairs', len(duplicates_s))
if os.path.exists(settings_file):
with open(settings_file, 'rb') as f:
deduper = dedupe.StaticDedupe(f, 1)
else:
fields = [{'field' : 'name', 'type': 'String'},
{'field' : 'name', 'type': 'Exact'},
{'field' : 'address', 'type': 'String'},
{'field' : 'cuisine', 'type': 'ShortString'},
{'field' : 'city', 'type' : 'ShortString'}
]
deduper = dedupe.Dedupe(fields, num_cores=5)
deduper.sample(data_d, 10000)
deduper.markPairs(training_pairs)
deduper.train()
with open(settings_file, 'wb') as f:
deduper.writeSettings(f)
alpha = deduper.threshold(data_d, 1.5)
# print candidates
print('clustering...')
clustered_dupes = deduper.match(data_d, threshold=alpha)
print('Evaluate Clustering')
confirm_dupes = set([])
for dupes, score in clustered_dupes:
for pair in combinations(dupes, 2):
confirm_dupes.add(frozenset((data_d[pair[0]],
data_d[pair[1]])))
evaluateDuplicates(confirm_dupes, duplicates_s)
print('ran in ', time.time() - t0, 'seconds')
| mit |
jpn--/larch | larch/prelearning.py | 1 | 11273 |
import logging
import numpy
import pandas
import os
from appdirs import user_cache_dir
import joblib
from typing import MutableMapping
from .general_precision import l4_float_dtype
from .log import logger_name
from .dataframes import DataFrames
def user_cache_file(filename, appname=None, appauthor=None, version=None, opinion=True):
d = user_cache_dir(appname=appname, appauthor=appauthor, version=version, opinion=opinion)
os.makedirs(d, exist_ok=True)
return os.path.join(d, filename)
class Prelearner():
"""
A prelearner for use with Larch.
A prelearner uses a machine learning classifier to make an initial
prediction of the result. This initial prediction is then added
as an input data column for Larch, effectively creating a chained
classifier.
Parameters
----------
training_X : pandas.DataFrame
The exogenous variables.
training_Y : pandas.DataFrame
The observed choices in the training data.
training_W : pandas.DataFrame, optional
The weights.
classifier : sklearn Classifier or Regressor
This is the class object for the selected classifier, not
an existing instance. This classifier or Regressor will be
instantiated and trained using the data above to generate
the prediction.
fit : dict or False, optional
A dictionary of arguments to pass to the `fit` method of the
classifier, or set to False to not fit the classifier
during the initialization of this object.
cache_file : str, optional
A cache file name to store the trained prelearner. If just a filename is given,
it will be stored in `appdirs.user_cache_file()`. If instead an absolute path or
a relative path beginning with '.' is given, that location will be used.
If the file exists, it will be loaded instead of re-training.
output_name : str, default 'prelearned_utility'
The name of the output column from this prelearner.
grid_cv_params : dict or List[dict], optional
If given, this is used as the `param_grid` argument
to initialize a :class:`sklearn.model_selection.GridSearchCV`
wrapped around the classifier, instead of using the
classifier directly.
grid_cv_kwds : dict, optional
If `grid_cv_params` is given, this dict gives other keyword
arguments given to :class:`sklearn.model_selection.GridSearchCV`.
**kwargs
Any other keyword arguments are passed through to the classifier's
constructor.
"""
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
classifier=None,
fit=True,
cache_file=None,
output_name='prelearned_utility',
appname='larch',
grid_cv_params=None,
grid_cv_kwds=None,
validation_dataframes=None,
**kwargs,
):
if classifier is None:
raise ValueError('must give a classifier')
if fit is True:
fit = {}
logger = logging.getLogger(logger_name)
self.input_ca_columns = ca_columns if ca_columns is not None else []
self.input_co_columns = co_columns
self.eval_set_names = fit.pop('eval_set_names', [])
if isinstance(fit, MutableMapping):
if 'validation_percent' in fit and validation_dataframes is None:
vpct = fit.pop('validation_percent')
dataframes, validation_dataframes = dataframes.split([100-vpct, vpct])
else:
vpct = 'preset'
if validation_dataframes is not None:
validation_X = self.filter_and_join_columns(
validation_dataframes.data_ca_as_ce(),
validation_dataframes.data_co,
)
validation_Y = validation_dataframes.array_ch_as_ce()
validation_W = validation_dataframes.array_wt_as_ce()
fit['eval_set'] = fit.get('eval_set', []) + [(validation_X, validation_Y)]
if validation_W is not None:
fit['sample_weight_eval_set'] = fit.get('sample_weight_eval_set', []) + [validation_W]
self.eval_set_names += [f'validation_{vpct}']
training_X = self.filter_and_join_columns(
dataframes.data_ca_as_ce(),
dataframes.data_co,
)
training_Y = dataframes.array_ch_as_ce()
training_W = dataframes.array_wt_as_ce()
self.output_column = output_name
if cache_file is not None:
if os.path.isabs(cache_file) or cache_file[:2] in ('./', '..', '.\\'):
cache_clf_file = cache_file
else:
cache_clf_file = user_cache_file(cache_file, appname=appname)
else:
cache_clf_file = None
if cache_clf_file is not None and os.path.exists(cache_clf_file):
logger.info(f'LOADING {cache_clf_file}...')
clf = joblib.load(cache_clf_file)
logger.info(f'COMPLETED LOADING {cache_clf_file}')
else:
if grid_cv_params is not None:
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(
classifier(**kwargs),
grid_cv_params,
**grid_cv_kwds,
)
else:
clf = classifier(**kwargs)
if fit is not False:
if 'train_as_eval' in fit:
fit.pop('train_as_eval')
fit['eval_set'] = [(training_X, training_Y),] + fit.get('eval_set',[])
if training_W is not None:
fit['sample_weight_eval_set'] = [training_W,]+fit.get('sample_weight_eval_set',[])
self.eval_set_names = ['training'] + self.eval_set_names
logger.info(f'FITTING {classifier}...')
if training_W is not None:
clf.fit(training_X, training_Y, sample_weight=training_W, **fit)
else:
clf.fit(training_X, training_Y, **fit)
logger.info(f'FITTED {classifier}')
if cache_clf_file is not None:
joblib.dump(clf, cache_clf_file)
logger.info(f'SAVED {cache_clf_file}')
self.clf = clf
self._predict_type = 'predict_proba col 1'
def filter_ca_columns(self, X):
# filter the columns of the input into the correct form for the prelearner.
try:
X1 = X[self.input_ca_columns]
except KeyError:
X1 = pandas.DataFrame(
X.eval(self.input_ca_columns).T.astype(float),
index=X.index,
columns=self.input_ca_columns,
)
return X1
def filter_and_join_columns(self, X_ca, X_co):
training_X = self.filter_ca_columns(X_ca)
if self.input_co_columns:
try:
X_co = X_co[self.input_co_columns]
except KeyError:
X_co = pandas.DataFrame(
X_co.eval(self.input_co_columns).T.astype(float),
index=X_co.index,
columns=self.input_co_columns,
)
training_X = training_X.join(X_co, on=training_X.index.levels[0].name, how='left').fillna(0)
return training_X
def apply(
self,
X,
dtype=None,
output_name=None,
**kwargs,
):
"""
Apply the prelearner to compute pseudo-utility.
Parameters
----------
X : pandas.DataFrame
dtype : dtype, default float
The dtype to use for the output column.
output_name : str, optional
The name of the output column from this
application of the prelearner.
**kwargs
Other keyword arguments are forwarded to the
`predict` or `predict_proba` method of the
`clf` member.
Returns
-------
pandas.DataFrame
"""
if dtype is None:
dtype = l4_float_dtype
if isinstance(X, DataFrames):
X_ca = X._data_ca_or_ce
X_co = X.data_co
else:
X_ca = X
X_co = None
X_in = self.filter_and_join_columns(
X_ca,
X_co,
)
if output_name is None:
output_name = self.output_column
if self._predict_type == 'predict_proba col 1':
X_ca.loc[:,output_name] = numpy.log(self.clf.predict_proba(X_in, **kwargs)[:, 1]).astype(dtype)
elif self._predict_type == 'predict':
X_ca.loc[:,output_name] = numpy.log(self.clf.predict(X_in, **kwargs)).astype(dtype)
else:
raise TypeError(self._predict_type)
return X
class RandomForestPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
default_kwargs = dict(
n_estimators=200,
warm_start=False,
max_features=None,
oob_score=True,
n_jobs=-1,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=RandomForestClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
class XGBoostHardPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBClassifier
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
# silent=True,
objective='binary:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
class XGBoostSoftPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBRegressor
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
# silent=True,
objective='reg:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBRegressor,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
self._predict_type = 'predict'
class XGBoostPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBRegressor, XGBClassifier
training_Y = dataframes.array_ch_as_ce()
use_soft = numpy.any((training_Y != 0) & (training_Y != 1.0))
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
# silent=True,
objective='reg:logistic' if use_soft else 'binary:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBRegressor if use_soft else XGBClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
self._predict_type = 'predict' if use_soft else 'predict_proba col 1'
def evals_result(self):
j = [
pandas.DataFrame({mk:numpy.asarray(mv) for mk, mv in ev.items()})
for ek, ev in self.clf.evals_result_.items()
]
k = [
ek
for ek, ev in self.clf.evals_result_.items()
]
for i in range(len(self.eval_set_names)):
if len(k)>i:
k[i] = self.eval_set_names[i]
return pandas.concat(j, axis=1, keys=k, sort=False)
| gpl-3.0 |
uber/pyro | pyro/distributions/transforms/planar.py | 1 | 8881 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Transform, constraints
from pyro.nn import DenseNN
from ..conditional import ConditionalTransformModule
from ..torch_transform import TransformModule
from ..util import copy_docs_from
@copy_docs_from(Transform)
class ConditionedPlanar(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
bijective = True
def __init__(self, params):
super().__init__(cache_size=1)
self._params = params
self._cached_logDetJ = None
# This method ensures that torch(u_hat, w) > -1, required for invertibility
def u_hat(self, u, w):
alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)
a_prime = -1 + F.softplus(alpha)
return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x => y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
bias, u, w = self._params() if callable(self._params) else self._params
# x ~ (batch_size, dim_size, 1)
# w ~ (batch_size, 1, dim_size)
# bias ~ (batch_size, 1)
act = torch.tanh(
torch.matmul(w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + bias
)
u_hat = self.u_hat(u, w)
y = x + u_hat * act
psi_z = (1.0 - act.pow(2)) * w
self._cached_logDetJ = torch.log(
torch.abs(
1
+ torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1))
.squeeze(-1)
.squeeze(-1)
)
)
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. As noted above, this implementation is incapable of
inverting arbitrary values `y`; rather it assumes `y` is the result of a
previously computed application of the bijector to some `x` (which was
cached on the forward call)
"""
raise KeyError(
"ConditionedPlanar object expected to find key in intermediates cache but didn't"
)
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian
"""
x_old, y_old = self._cached_x_y
if x is not x_old or y is not y_old:
# This call to the parent class Transform will update the cache
# as well as calling self._call and recalculating y and log_detJ
self(x)
return self._cached_logDetJ
@copy_docs_from(ConditionedPlanar)
class Planar(ConditionedPlanar, TransformModule):
r"""
A 'planar' bijective transform with equation,
:math:`\mathbf{y} = \mathbf{x} + \mathbf{u}\tanh(\mathbf{w}^T\mathbf{z}+b)`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
and the learnable parameters are :math:`b\in\mathbb{R}`,
:math:`\mathbf{u}\in\mathbb{R}^D`, :math:`\mathbf{w}\in\mathbb{R}^D` for
input dimension :math:`D`. For this to be an invertible transformation, the
condition :math:`\mathbf{w}^T\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> transform = Planar(10)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param input_dim: the dimension of the input (and output) variable.
:type input_dim: int
References:
[1] Danilo Jimenez Rezende, Shakir Mohamed. Variational Inference with
Normalizing Flows. [arXiv:1505.05770]
"""
domain = constraints.real_vector
codomain = constraints.real_vector
bijective = True
def __init__(self, input_dim):
super().__init__(self._params)
self.bias = nn.Parameter(
torch.Tensor(
1,
)
)
self.u = nn.Parameter(
torch.Tensor(
input_dim,
)
)
self.w = nn.Parameter(
torch.Tensor(
input_dim,
)
)
self.input_dim = input_dim
self.reset_parameters()
def _params(self):
return self.bias, self.u, self.w
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.u.size(0))
self.w.data.uniform_(-stdv, stdv)
self.u.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
@copy_docs_from(ConditionalTransformModule)
class ConditionalPlanar(ConditionalTransformModule):
r"""
A conditional 'planar' bijective transform using the equation,
:math:`\mathbf{y} = \mathbf{x} + \mathbf{u}\tanh(\mathbf{w}^T\mathbf{z}+b)`
where :math:`\mathbf{x}` are the inputs with dimension :math:`D`,
:math:`\mathbf{y}` are the outputs, and the pseudo-parameters
:math:`b\in\mathbb{R}`, :math:`\mathbf{u}\in\mathbb{R}^D`, and
:math:`\mathbf{w}\in\mathbb{R}^D` are the output of a function, e.g. a NN,
with input :math:`z\in\mathbb{R}^{M}` representing the context variable to
condition on. For this to be an invertible transformation, the condition
:math:`\mathbf{w}^T\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`
this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn.dense_nn import DenseNN
>>> input_dim = 10
>>> context_dim = 5
>>> batch_size = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [1, input_dim, input_dim]
>>> hypernet = DenseNN(context_dim, [50, 50], param_dims)
>>> transform = ConditionalPlanar(hypernet)
>>> z = torch.rand(batch_size, context_dim)
>>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,
... [transform]).condition(z)
>>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param nn: a function inputting the context variable and outputting a triplet of
real-valued parameters of dimensions :math:`(1, D, D)`.
:type nn: callable
References:
[1] Variational Inference with Normalizing Flows [arXiv:1505.05770]
Danilo Jimenez Rezende, Shakir Mohamed
"""
domain = constraints.real_vector
codomain = constraints.real_vector
bijective = True
def __init__(self, nn):
super().__init__()
self.nn = nn
def _params(self, context):
return self.nn(context)
def condition(self, context):
params = partial(self._params, context)
return ConditionedPlanar(params)
def planar(input_dim):
"""
A helper function to create a :class:`~pyro.distributions.transforms.Planar`
object for consistency with other helpers.
:param input_dim: Dimension of input variable
:type input_dim: int
"""
return Planar(input_dim)
def conditional_planar(input_dim, context_dim, hidden_dims=None):
"""
A helper function to create a
:class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care
of constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [input_dim * 10, input_dim * 10]
:type hidden_dims: list[int]
"""
if hidden_dims is None:
hidden_dims = [input_dim * 10, input_dim * 10]
nn = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
return ConditionalPlanar(nn)
| apache-2.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/r-gdsfmt/package.py | 3 | 2449 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGdsfmt(RPackage):
"""This package provides a high-level R interface to CoreArray Genomic
Data Structure (GDS) data files, which are portable across platforms
with hierarchical structure to store multiple scalable array-oriented
data sets with metadata information. It is suited for large-scale
datasets, especially for data which are much larger than the available
random-access memory. The gdsfmt package offers the efficient
operations specifically designed for integers of less than 8 bits,
since a diploid genotype, like single-nucleotide polymorphism (SNP),
usually occupies fewer bits than a byte. Data compression and
decompression are available with relatively efficient random access.
It is also allowed to read a GDS file in parallel with multiple R
processes supported by the package parallel."""
homepage = "http://bioconductor.org/packages/gdsfmt/"
url = "https://git.bioconductor.org/packages/gdsfmt"
version('1.14.1', git='https://git.bioconductor.org/packages/gdsfmt', commit='15743647b7eea5b82d3284858b4591fb6e59959d')
depends_on('r@3.4.0:3.4.9', when='@1.14.1')
| lgpl-2.1 |
uber/pyro | tests/contrib/test_util.py | 1 | 3036 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from collections import OrderedDict
import pytest
import torch
from pyro.contrib.util import (
get_indices,
lexpand,
rdiag,
rexpand,
rmv,
rtril,
rvv,
tensor_to_dict,
)
from tests.common import assert_equal
def test_get_indices_sizes():
sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)])
assert_equal(get_indices(["b"], sizes=sizes), torch.tensor([2, 3]))
assert_equal(get_indices(["b", "c"], sizes=sizes), torch.tensor([2, 3, 4, 5]))
tensors = OrderedDict(
[("a", torch.ones(2)), ("b", torch.ones(2)), ("c", torch.ones(2))]
)
assert_equal(get_indices(["b"], tensors=tensors), torch.tensor([2, 3]))
assert_equal(get_indices(["b", "c"], tensors=tensors), torch.tensor([2, 3, 4, 5]))
def test_tensor_to_dict():
sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)])
vector = torch.tensor([1.0, 2, 3, 4, 5, 6])
assert_equal(
tensor_to_dict(sizes, vector),
{
"a": torch.tensor([1.0, 2.0]),
"b": torch.tensor([3.0, 4.0]),
"c": torch.tensor([5.0, 6.0]),
},
)
assert_equal(
tensor_to_dict(sizes, vector, subset=["b"]), {"b": torch.tensor([3.0, 4.0])}
)
@pytest.mark.parametrize(
"A,b", [(torch.tensor([[1.0, 2.0], [2.0, -3.0]]), torch.tensor([-1.0, 2.0]))]
)
def test_rmv(A, b):
assert_equal(rmv(A, b), A.mv(b), prec=1e-8)
batched_A = lexpand(A, 5, 4)
batched_b = lexpand(b, 5, 4)
expected_Ab = lexpand(A.mv(b), 5, 4)
assert_equal(rmv(batched_A, batched_b), expected_Ab, prec=1e-8)
@pytest.mark.parametrize("a,b", [(torch.tensor([1.0, 2.0]), torch.tensor([-1.0, 2.0]))])
def test_rvv(a, b):
assert_equal(rvv(a, b), torch.dot(a, b), prec=1e-8)
batched_a = lexpand(a, 5, 4)
batched_b = lexpand(b, 5, 4)
expected_ab = lexpand(torch.dot(a, b), 5, 4)
assert_equal(rvv(batched_a, batched_b), expected_ab, prec=1e-8)
def test_lexpand():
A = torch.tensor([[1.0, 2.0], [-2.0, 0]])
assert_equal(lexpand(A), A, prec=1e-8)
assert_equal(lexpand(A, 4), A.expand(4, 2, 2), prec=1e-8)
assert_equal(lexpand(A, 4, 2), A.expand(4, 2, 2, 2), prec=1e-8)
def test_rexpand():
A = torch.tensor([[1.0, 2.0], [-2.0, 0]])
assert_equal(rexpand(A), A, prec=1e-8)
assert_equal(rexpand(A, 4), A.unsqueeze(-1).expand(2, 2, 4), prec=1e-8)
assert_equal(
rexpand(A, 4, 2), A.unsqueeze(-1).unsqueeze(-1).expand(2, 2, 4, 2), prec=1e-8
)
def test_rtril():
A = torch.tensor([[1.0, 2.0], [-2.0, 0]])
assert_equal(rtril(A), torch.tril(A), prec=1e-8)
expanded = lexpand(A, 5, 4)
expected = lexpand(torch.tril(A), 5, 4)
assert_equal(rtril(expanded), expected, prec=1e-8)
def test_rdiag():
v = torch.tensor([1.0, 2.0, -1.0])
assert_equal(rdiag(v), torch.diag(v), prec=1e-8)
expanded = lexpand(v, 5, 4)
expeceted = lexpand(torch.diag(v), 5, 4)
assert_equal(rdiag(expanded), expeceted, prec=1e-8)
| apache-2.0 |
matthew-tucker/mne-python | examples/inverse/plot_tf_lcmv.py | 14 | 5869 | """
=====================================
Time-frequency beamforming using LCMV
=====================================
Compute LCMV source power in a grid of time-frequency windows and display
results.
The original reference is:
Dalal et al. Five-dimensional neuroimaging: Localization of the time-frequency
dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
"""
# Author: Roman Goj <roman.goj@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import compute_covariance
from mne.io import Raw
from mne.datasets import sample
from mne.event import make_fixed_length_events
from mne.beamformer import tf_lcmv
from mne.viz import plot_source_spectrogram
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
noise_fname = data_path + '/MEG/sample/ernoise_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
###############################################################################
# Read raw data, preload to allow filtering
raw = Raw(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
# Pick a selection of magnetometer channels. A subset of all channels was used
# to speed up the example. For a solution based on all MEG channels use
# meg=True, selection=None and add grad=4000e-13 to the reject dictionary.
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads',
selection=left_temporal_channels)
reject = dict(mag=4e-12)
# Setting time limits for reading epochs. Note that tmin and tmax are set so
# that time-frequency beamforming will be performed for a wider range of time
# points than will later be displayed on the final spectrogram. This ensures
# that all time bins displayed represent an average of an equal number of time
# windows.
tmin, tmax = -0.55, 0.75 # s
tmin_plot, tmax_plot = -0.3, 0.5 # s
# Read epochs. Note that preload is set to False to enable tf_lcmv to read the
# underlying raw object.
# Filtering is then performed on raw data in tf_lcmv and the epochs
# parameters passed here are used to create epochs from filtered data. However,
# reading epochs without preloading means that bad epoch rejection is delayed
# until later. To perform bad epoch rejection based on the reject parameter
# passed here, run epochs.drop_bad_epochs(). This is done automatically in
# tf_lcmv to reject bad epochs based on unfiltered data.
event_id = 1
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=False,
reject=reject)
# Read empty room noise, preload to allow filtering
raw_noise = Raw(noise_fname, preload=True)
raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel
# Create artificial events for empty room noise data
events_noise = make_fixed_length_events(raw_noise, event_id, duration=1.)
# Create an epochs object using preload=True to reject bad epochs based on
# unfiltered data
epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin, tmax,
proj=True, picks=picks, baseline=None,
preload=True, reject=reject)
# Make sure the number of noise epochs is the same as data epochs
epochs_noise = epochs_noise[:len(epochs.events)]
# Read forward operator
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Read label
label = mne.read_label(fname_label)
###############################################################################
# Time-frequency beamforming based on LCMV
# Setting frequency bins as in Dalal et al. 2008 (high gamma was subdivided)
freq_bins = [(4, 12), (12, 30), (30, 55), (65, 299)] # Hz
win_lengths = [0.3, 0.2, 0.15, 0.1] # s
# Setting the time step
tstep = 0.05
# Setting the whitened data covariance regularization parameter
data_reg = 0.001
# Subtract evoked response prior to computation?
subtract_evoked = False
# Calculating covariance from empty room noise. To use baseline data as noise
# substitute raw for raw_noise, epochs.events for epochs_noise.events, tmin for
# desired baseline length, and 0 for tmax_plot.
# Note, if using baseline data, the averaged evoked response in the baseline
# period should be flat.
noise_covs = []
for (l_freq, h_freq) in freq_bins:
raw_band = raw_noise.copy()
raw_band.filter(l_freq, h_freq, picks=epochs.picks, method='iir', n_jobs=1)
epochs_band = mne.Epochs(raw_band, epochs_noise.events, event_id,
tmin=tmin_plot, tmax=tmax_plot, baseline=None,
picks=epochs.picks, proj=True)
noise_cov = compute_covariance(epochs_band, method='shrunk')
noise_covs.append(noise_cov)
del raw_band # to save memory
# Computing LCMV solutions for time-frequency windows in a label in source
# space for faster computation, use label=None for full solution
stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, subtract_evoked=subtract_evoked,
reg=data_reg, label=label)
# Plotting source spectrogram for source with maximum activity.
# Note that tmin and tmax are set to display a time range that is smaller than
# the one for which beamforming estimates were calculated. This ensures that
# all time bins shown are a result of smoothing across an identical number of
# time windows.
plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot,
source_index=None, colorbar=True)
| bsd-3-clause |
uber/pyro | pyro/infer/mcmc/nuts.py | 1 | 21893 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
import pyro
import pyro.distributions as dist
from pyro.distributions.util import scalar_like
from pyro.infer.autoguide import init_to_uniform
from pyro.infer.mcmc.hmc import HMC
from pyro.ops.integrator import potential_grad, velocity_verlet
from pyro.util import optional, torch_isnan
def _logaddexp(x, y):
minval, maxval = (x, y) if x < y else (y, x)
return (minval - maxval).exp().log1p() + maxval
# sum_accept_probs and num_proposals are used to calculate
# the statistic accept_prob for Dual Averaging scheme;
# z_left_grads and z_right_grads are kept to avoid recalculating
# grads at left and right leaves;
# r_sum is used to check turning condition;
# z_proposal_pe and z_proposal_grads are used to cache the
# potential energy and potential energy gradient values for
# the proposal trace.
# weight is the number of valid points in case we use slice sampling
# and is the log sum of (unnormalized) probabilites of valid points
# when we use multinomial sampling
_TreeInfo = namedtuple(
"TreeInfo",
[
"z_left",
"r_left",
"r_left_unscaled",
"z_left_grads",
"z_right",
"r_right",
"r_right_unscaled",
"z_right_grads",
"z_proposal",
"z_proposal_pe",
"z_proposal_grads",
"r_sum",
"weight",
"turning",
"diverging",
"sum_accept_probs",
"num_proposals",
],
)
class NUTS(HMC):
"""
No-U-Turn Sampler kernel, which provides an efficient and convenient way
to run Hamiltonian Monte Carlo. The number of steps taken by the
integrator is dynamically adjusted on each call to ``sample`` to ensure
an optimal length for the Hamiltonian trajectory [1]. As such, the samples
generated will typically have lower autocorrelation than those generated
by the :class:`~pyro.infer.mcmc.HMC` kernel. Optionally, the NUTS kernel
also provides the ability to adapt step size during the warmup phase.
Refer to the `baseball example <https://github.com/pyro-ppl/pyro/blob/dev/examples/baseball.py>`_
to see how to do Bayesian inference in Pyro using NUTS.
**References**
[1] `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`,
Matthew D. Hoffman, and Andrew Gelman.
[2] `A Conceptual Introduction to Hamiltonian Monte Carlo`,
Michael Betancourt
[3] `Slice Sampling`,
Radford M. Neal
:param model: Python callable containing Pyro primitives.
:param potential_fn: Python callable calculating potential energy with input
is a dict of real support parameters.
:param float step_size: Determines the size of a single step taken by the
verlet integrator while computing the trajectory using Hamiltonian
dynamics. If not specified, it will be set to 1.
:param bool adapt_step_size: A flag to decide if we want to adapt step_size
during warm-up phase using Dual Averaging scheme.
:param bool adapt_mass_matrix: A flag to decide if we want to adapt mass
matrix during warm-up phase using Welford scheme.
:param bool full_mass: A flag to decide if mass matrix is dense or diagonal.
:param bool use_multinomial_sampling: A flag to decide if we want to sample
candidates along its trajectory using "multinomial sampling" or using
"slice sampling". Slice sampling is used in the original NUTS paper [1],
while multinomial sampling is suggested in [2]. By default, this flag is
set to True. If it is set to `False`, NUTS uses slice sampling.
:param dict transforms: Optional dictionary that specifies a transform
for a sample site with constrained support to unconstrained space. The
transform should be invertible, and implement `log_abs_det_jacobian`.
If not specified and the model has sites with constrained support,
automatic transformations will be applied, as specified in
:mod:`torch.distributions.constraint_registry`.
:param int max_plate_nesting: Optional bound on max number of nested
:func:`pyro.plate` contexts. This is required if model contains
discrete sample sites that can be enumerated over in parallel.
:param bool jit_compile: Optional parameter denoting whether to use
the PyTorch JIT to trace the log density computation, and use this
optimized executable trace in the integrator.
:param dict jit_options: A dictionary contains optional arguments for
:func:`torch.jit.trace` function.
:param bool ignore_jit_warnings: Flag to ignore warnings from the JIT
tracer when ``jit_compile=True``. Default is False.
:param float target_accept_prob: Target acceptance probability of step size
adaptation scheme. Increasing this value will lead to a smaller step size,
so the sampling will be slower but more robust. Default to 0.8.
:param int max_tree_depth: Max depth of the binary tree created during the doubling
scheme of NUTS sampler. Default to 10.
:param callable init_strategy: A per-site initialization function.
See :ref:`autoguide-initialization` section for available functions.
Example:
>>> true_coefs = torch.tensor([1., 2., 3.])
>>> data = torch.randn(2000, 3)
>>> dim = 3
>>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()
>>>
>>> def model(data):
... coefs_mean = torch.zeros(dim)
... coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
... y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
... return y
>>>
>>> nuts_kernel = NUTS(model, adapt_step_size=True)
>>> mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)
>>> mcmc.run(data)
>>> mcmc.get_samples()['beta'].mean(0) # doctest: +SKIP
tensor([ 0.9221, 1.9464, 2.9228])
"""
def __init__(
self,
model=None,
potential_fn=None,
step_size=1,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
use_multinomial_sampling=True,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
target_accept_prob=0.8,
max_tree_depth=10,
init_strategy=init_to_uniform,
):
super().__init__(
model,
potential_fn,
step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
full_mass=full_mass,
transforms=transforms,
max_plate_nesting=max_plate_nesting,
jit_compile=jit_compile,
jit_options=jit_options,
ignore_jit_warnings=ignore_jit_warnings,
target_accept_prob=target_accept_prob,
init_strategy=init_strategy,
)
self.use_multinomial_sampling = use_multinomial_sampling
self._max_tree_depth = max_tree_depth
# There are three conditions to stop doubling process:
# + Tree is becoming too big.
# + The trajectory is making a U-turn.
# + The probability of the states becoming negligible: p(z, r) << u,
# here u is the "slice" variable introduced at the `self.sample(...)` method.
# Denote E_p = -log p(z, r), E_u = -log u, the third condition is equivalent to
# sliced_energy := E_p - E_u > some constant =: max_sliced_energy.
# This also suggests the notion "diverging" in the implemenation:
# when the energy E_p diverges from E_u too much, we stop doubling.
# Here, as suggested in [1], we set dE_max = 1000.
self._max_sliced_energy = 1000
def _is_turning(self, r_left_unscaled, r_right_unscaled, r_sum):
# We follow the strategy in Section A.4.2 of [2] for this implementation.
left_angle = 0.0
right_angle = 0.0
for site_names, value in r_sum.items():
rho = (
value - (r_left_unscaled[site_names] + r_right_unscaled[site_names]) / 2
)
left_angle += r_left_unscaled[site_names].dot(rho)
right_angle += r_right_unscaled[site_names].dot(rho)
return (left_angle <= 0) or (right_angle <= 0)
def _build_basetree(self, z, r, z_grads, log_slice, direction, energy_current):
step_size = self.step_size if direction == 1 else -self.step_size
z_new, r_new, z_grads, potential_energy = velocity_verlet(
z,
r,
self.potential_fn,
self.mass_matrix_adapter.kinetic_grad,
step_size,
z_grads=z_grads,
)
r_new_unscaled = self.mass_matrix_adapter.unscale(r_new)
energy_new = potential_energy + self._kinetic_energy(r_new_unscaled)
# handle the NaN case
energy_new = (
scalar_like(energy_new, float("inf"))
if torch_isnan(energy_new)
else energy_new
)
sliced_energy = energy_new + log_slice
diverging = sliced_energy > self._max_sliced_energy
delta_energy = energy_new - energy_current
accept_prob = (-delta_energy).exp().clamp(max=1.0)
if self.use_multinomial_sampling:
tree_weight = -sliced_energy
else:
# As a part of the slice sampling process (see below), along the trajectory
# we eliminate states which p(z, r) < u, or dE > 0.
# Due to this elimination (and stop doubling conditions),
# the weight of binary tree might not equal to 2^tree_depth.
tree_weight = scalar_like(sliced_energy, 1.0 if sliced_energy <= 0 else 0.0)
r_sum = r_new_unscaled
return _TreeInfo(
z_new,
r_new,
r_new_unscaled,
z_grads,
z_new,
r_new,
r_new_unscaled,
z_grads,
z_new,
potential_energy,
z_grads,
r_sum,
tree_weight,
False,
diverging,
accept_prob,
1,
)
def _build_tree(
self, z, r, z_grads, log_slice, direction, tree_depth, energy_current
):
if tree_depth == 0:
return self._build_basetree(
z, r, z_grads, log_slice, direction, energy_current
)
# build the first half of tree
half_tree = self._build_tree(
z, r, z_grads, log_slice, direction, tree_depth - 1, energy_current
)
z_proposal = half_tree.z_proposal
z_proposal_pe = half_tree.z_proposal_pe
z_proposal_grads = half_tree.z_proposal_grads
# Check conditions to stop doubling. If we meet that condition,
# there is no need to build the other tree.
if half_tree.turning or half_tree.diverging:
return half_tree
# Else, build remaining half of tree.
# If we are going to the right, start from the right leaf of the first half.
if direction == 1:
z = half_tree.z_right
r = half_tree.r_right
z_grads = half_tree.z_right_grads
else: # otherwise, start from the left leaf of the first half
z = half_tree.z_left
r = half_tree.r_left
z_grads = half_tree.z_left_grads
other_half_tree = self._build_tree(
z, r, z_grads, log_slice, direction, tree_depth - 1, energy_current
)
if self.use_multinomial_sampling:
tree_weight = _logaddexp(half_tree.weight, other_half_tree.weight)
else:
tree_weight = half_tree.weight + other_half_tree.weight
sum_accept_probs = half_tree.sum_accept_probs + other_half_tree.sum_accept_probs
num_proposals = half_tree.num_proposals + other_half_tree.num_proposals
r_sum = {
site_names: half_tree.r_sum[site_names] + other_half_tree.r_sum[site_names]
for site_names in self.inverse_mass_matrix
}
# The probability of that proposal belongs to which half of tree
# is computed based on the weights of each half.
if self.use_multinomial_sampling:
other_half_tree_prob = (other_half_tree.weight - tree_weight).exp()
else:
# For the special case that the weights of each half are both 0,
# we choose the proposal from the first half
# (any is fine, because the probability of picking it at the end is 0!).
other_half_tree_prob = (
other_half_tree.weight / tree_weight
if tree_weight > 0
else scalar_like(tree_weight, 0.0)
)
is_other_half_tree = pyro.sample(
"is_other_half_tree", dist.Bernoulli(probs=other_half_tree_prob)
)
if is_other_half_tree == 1:
z_proposal = other_half_tree.z_proposal
z_proposal_pe = other_half_tree.z_proposal_pe
z_proposal_grads = other_half_tree.z_proposal_grads
# leaves of the full tree are determined by the direction
if direction == 1:
z_left = half_tree.z_left
r_left = half_tree.r_left
r_left_unscaled = half_tree.r_left_unscaled
z_left_grads = half_tree.z_left_grads
z_right = other_half_tree.z_right
r_right = other_half_tree.r_right
r_right_unscaled = other_half_tree.r_right_unscaled
z_right_grads = other_half_tree.z_right_grads
else:
z_left = other_half_tree.z_left
r_left = other_half_tree.r_left
r_left_unscaled = other_half_tree.r_left_unscaled
z_left_grads = other_half_tree.z_left_grads
z_right = half_tree.z_right
r_right = half_tree.r_right
r_right_unscaled = half_tree.r_right_unscaled
z_right_grads = half_tree.z_right_grads
# We already check if first half tree is turning. Now, we check
# if the other half tree or full tree are turning.
turning = other_half_tree.turning or self._is_turning(
r_left_unscaled, r_right_unscaled, r_sum
)
# The divergence is checked by the second half tree (the first half is already checked).
diverging = other_half_tree.diverging
return _TreeInfo(
z_left,
r_left,
r_left_unscaled,
z_left_grads,
z_right,
r_right,
r_right_unscaled,
z_right_grads,
z_proposal,
z_proposal_pe,
z_proposal_grads,
r_sum,
tree_weight,
turning,
diverging,
sum_accept_probs,
num_proposals,
)
def sample(self, params):
z, potential_energy, z_grads = self._fetch_from_cache()
# recompute PE when cache is cleared
if z is None:
z = params
z_grads, potential_energy = potential_grad(self.potential_fn, z)
self._cache(z, potential_energy, z_grads)
# return early if no sample sites
elif len(z) == 0:
self._t += 1
self._mean_accept_prob = 1.0
if self._t > self._warmup_steps:
self._accept_cnt += 1
return z
r, r_unscaled = self._sample_r(name="r_t={}".format(self._t))
energy_current = self._kinetic_energy(r_unscaled) + potential_energy
# Ideally, following a symplectic integrator trajectory, the energy is constant.
# In that case, we can sample the proposal uniformly, and there is no need to use "slice".
# However, it is not the case for real situation: there are errors during the computation.
# To deal with that problem, as in [1], we introduce an auxiliary "slice" variable (denoted
# by u).
# The sampling process goes as follows:
# first sampling u from initial state (z_0, r_0) according to
# u ~ Uniform(0, p(z_0, r_0)),
# then sampling state (z, r) from the integrator trajectory according to
# (z, r) ~ Uniform({(z', r') in trajectory | p(z', r') >= u}).
#
# For more information about slice sampling method, see [3].
# For another version of NUTS which uses multinomial sampling instead of slice sampling,
# see [2].
if self.use_multinomial_sampling:
log_slice = -energy_current
else:
# Rather than sampling the slice variable from `Uniform(0, exp(-energy))`, we can
# sample log_slice directly using `energy`, so as to avoid potential underflow or
# overflow issues ([2]).
slice_exp_term = pyro.sample(
"slicevar_exp_t={}".format(self._t),
dist.Exponential(scalar_like(energy_current, 1.0)),
)
log_slice = -energy_current - slice_exp_term
z_left = z_right = z
r_left = r_right = r
r_left_unscaled = r_right_unscaled = r_unscaled
z_left_grads = z_right_grads = z_grads
accepted = False
r_sum = r_unscaled
sum_accept_probs = 0.0
num_proposals = 0
tree_weight = scalar_like(
energy_current, 0.0 if self.use_multinomial_sampling else 1.0
)
# Temporarily disable distributions args checking as
# NaNs are expected during step size adaptation.
with optional(pyro.validation_enabled(False), self._t < self._warmup_steps):
# doubling process, stop when turning or diverging
tree_depth = 0
while tree_depth < self._max_tree_depth:
direction = pyro.sample(
"direction_t={}_treedepth={}".format(self._t, tree_depth),
dist.Bernoulli(probs=scalar_like(tree_weight, 0.5)),
)
direction = int(direction.item())
if (
direction == 1
): # go to the right, start from the right leaf of current tree
new_tree = self._build_tree(
z_right,
r_right,
z_right_grads,
log_slice,
direction,
tree_depth,
energy_current,
)
# update leaf for the next doubling process
z_right = new_tree.z_right
r_right = new_tree.r_right
r_right_unscaled = new_tree.r_right_unscaled
z_right_grads = new_tree.z_right_grads
else: # go the the left, start from the left leaf of current tree
new_tree = self._build_tree(
z_left,
r_left,
z_left_grads,
log_slice,
direction,
tree_depth,
energy_current,
)
z_left = new_tree.z_left
r_left = new_tree.r_left
r_left_unscaled = new_tree.r_left_unscaled
z_left_grads = new_tree.z_left_grads
sum_accept_probs = sum_accept_probs + new_tree.sum_accept_probs
num_proposals = num_proposals + new_tree.num_proposals
# stop doubling
if new_tree.diverging:
if self._t >= self._warmup_steps:
self._divergences.append(self._t - self._warmup_steps)
break
if new_tree.turning:
break
tree_depth += 1
if self.use_multinomial_sampling:
new_tree_prob = (new_tree.weight - tree_weight).exp()
else:
new_tree_prob = new_tree.weight / tree_weight
rand = pyro.sample(
"rand_t={}_treedepth={}".format(self._t, tree_depth),
dist.Uniform(
scalar_like(new_tree_prob, 0.0), scalar_like(new_tree_prob, 1.0)
),
)
if rand < new_tree_prob:
accepted = True
z = new_tree.z_proposal
z_grads = new_tree.z_proposal_grads
self._cache(z, new_tree.z_proposal_pe, z_grads)
r_sum = {
site_names: r_sum[site_names] + new_tree.r_sum[site_names]
for site_names in r_unscaled
}
if self._is_turning(
r_left_unscaled, r_right_unscaled, r_sum
): # stop doubling
break
else: # update tree_weight
if self.use_multinomial_sampling:
tree_weight = _logaddexp(tree_weight, new_tree.weight)
else:
tree_weight = tree_weight + new_tree.weight
accept_prob = sum_accept_probs / num_proposals
self._t += 1
if self._t > self._warmup_steps:
n = self._t - self._warmup_steps
if accepted:
self._accept_cnt += 1
else:
n = self._t
self._adapter.step(self._t, z, accept_prob, z_grads)
self._mean_accept_prob += (accept_prob.item() - self._mean_accept_prob) / n
return z.copy()
| apache-2.0 |
vitaly-krugl/nupic | examples/opf/experiments/multistep/base/description.py | 10 | 15963 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
'field2': { 'clipInput': True,
'fieldname': u'field2',
'maxval': 50,
'minval': 0,
'n': 500,
'name': u'field2',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 16,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.25,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1],
'predictedField': 'field1',
'dataSource': 'fillInBySubExperiment',
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Fill in classifier steps
config['modelParams']['clParams']['steps'] = '%s' % \
(','.join([str(x) for x in config['predictionSteps']]))
# If the predicted field is field1 (category), use avg_err else if field 2
# (scalar) use aae as the metric
if config['predictedField'] == 'field1':
metricName = 'avg_err'
loggedMetrics = ['.*avg_err.*']
else:
metricName = 'aae'
loggedMetrics = ['.*aae.*']
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'multistep',
'streams': [ {
'columns': ['*'],
'info': 'multi-step',
'source': config['dataSource'],
}],
'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=config['predictedField'], metric=metricName,
inferenceElement='prediction', params={'window': 200}),
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction', params={'errorMetric': metricName,
'window': 200}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': loggedMetrics,
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': metricName, 'window': 200,
'steps': steps}))
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
rahuldhote/scikit-learn | sklearn/datasets/species_distributions.py | 197 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
vitaly-krugl/nupic | src/nupic/data/aggregator.py | 10 | 28894 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import datetime
import os
from pkg_resources import resource_filename
import time
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
"""The aggregator aggregates PF datasets
It supports aggregation of multiple records based on time.
Common use cases:
- Aggregate records by month
- Aggregate records every 3 months starting April 15th
- Aggregate records in 2.5 seconds intervals
Assumption: aggregated slices fit in memory. All the records that are aggregated
per period are stored in memory until the next slice starts and are only
aggregated then. If this assumption is too strong the script will need to write
slices to a temp storage or use incremental aggregation techniques.
"""
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter == None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min != None and max != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min != None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList)
def _filterRecord(filterList, record):
""" Takes a record and returns true if record meets filter criteria,
false otherwise
"""
for (fieldIdx, fp, params) in filterList:
x = dict()
x['value'] = record[fieldIdx]
x['acceptValues'] = params['acceptValues']
x['min'] = params['min']
x['max'] = params['max']
if not fp(x):
return False
# None of the field filters triggered, accept the record as a good one
return True
def _aggr_first(inList):
""" Returns first non-None element in the list, or None if all are None
"""
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_last(inList):
""" Returns last non-None element in the list, or None if all are None
"""
for elem in reversed(inList):
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_sum(inList):
""" Returns sum of the elements in the list. Missing items are replaced with
the mean value
"""
aggrMean = _aggr_mean(inList)
if aggrMean == None:
return None
aggrSum = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
else:
aggrSum += aggrMean
return aggrSum
def _aggr_mean(inList):
""" Returns mean of non-None elements of the list
"""
aggrSum = 0
nonNone = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
nonNone += 1
if nonNone != 0:
return aggrSum / nonNone
else:
return None
def _aggr_mode(inList):
""" Returns most common value seen in the non-None elements of the list
"""
valueCounts = dict()
nonNone = 0
for elem in inList:
if elem == SENTINEL_VALUE_FOR_MISSING_DATA:
continue
nonNone += 1
if elem in valueCounts:
valueCounts[elem] += 1
else:
valueCounts[elem] = 1
# Get the most common one
if nonNone == 0:
return None
# Sort by counts
sortedCounts = valueCounts.items()
sortedCounts.sort(cmp=lambda x,y: x[1] - y[1], reverse=True)
return sortedCounts[0][0]
def _aggr_weighted_mean(inList, params):
""" Weighted mean uses params (must be the same size as inList) and
makes weighed mean of inList"""
assert(len(inList) == len(params))
# If all weights are 0, then the value is not defined, return None (missing)
weightsSum = sum(params)
if weightsSum == 0:
return None
weightedMean = 0
for i, elem in enumerate(inList):
weightedMean += elem * params[i]
return weightedMean / weightsSum
class Aggregator(object):
"""
This class provides context and methods for aggregating records. The caller
should construct an instance of Aggregator and then call the next() method
repeatedly to get each aggregated record.
This is an example aggregationInfo dict:
{
'hours': 1,
'minutes': 15,
'fields': [
('timestamp', 'first'),
('gym', 'first'),
('consumption', 'sum')
],
}
"""
def __init__(self, aggregationInfo, inputFields, timeFieldName=None,
sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):
""" Construct an aggregator instance
Params:
- aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
- aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] | [weeks days hours minutes seconds milliseconds
microseconds]
NOTE: years and months are mutually-exclusive with the other units. See
getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
- inputFields: The fields from the data source. This is a sequence of
`nupic.data.fieldmeta.FieldMetaInfo` instances.
- timeFieldName: name of the field to use as the time field. If None,
then the time field will be queried from the reader.
- sequenceIdFieldName: name of the field to use as the sequenecId. If None,
then the time field will be queried from the reader.
- resetFieldName: name of the field to use as the reset field. If None,
then the time field will be queried from the reader.
- filterInfo: a structure with rules for filtering records out
If the input file contains a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for these will be to pick the first:
lambda x: x[0]
"""
# -----------------------------------------------------------------------
# Save member variables.
# The same aggregationInfo dict may be used by the caller for generating
# more datasets (with slight changes), so it is safer to copy it here and
# all changes made here will not affect the input aggregationInfo
self._filterInfo = filterInfo
self._nullAggregation = False
self._inputFields = inputFields
# See if this is a null aggregation
self._nullAggregation = False
if aggregationInfo is None:
self._nullAggregation = True
else:
aggDef = defaultdict(lambda: 0, aggregationInfo)
if (aggDef['years'] == aggDef['months'] == aggDef['weeks'] ==
aggDef['days'] == aggDef['hours'] == aggDef['minutes'] ==
aggDef['seconds'] == aggDef['milliseconds'] ==
aggDef['microseconds'] == 0):
self._nullAggregation = True
# Prepare the field filtering info. The filter allows us to ignore records
# based on specified min or max values for each field.
self._filter = initFilter(self._inputFields, self._filterInfo)
# ----------------------------------------------------------------------
# Fill in defaults
self._fields = None
self._resetFieldIdx = None
self._timeFieldIdx = None
self._sequenceIdFieldIdx = None
self._aggTimeDelta = datetime.timedelta()
self._aggYears = 0
self._aggMonths = 0
# Init state variables used within next()
self._aggrInputBookmark = None
self._startTime = None
self._endTime = None
self._sequenceId = None
self._firstSequenceStartTime = None
self._inIdx = -1
self._slice = defaultdict(list)
# ========================================================================
# Get aggregation params
# self._fields will be a list of tuples: (fieldIdx, funcPtr, funcParam)
if not self._nullAggregation:
# ---------------------------------------------------------------------
# Verify that all aggregation field names exist in the input
fieldNames = [f[0] for f in aggregationInfo['fields']]
readerFieldNames = [f[0] for f in self._inputFields]
for name in fieldNames:
if not name in readerFieldNames:
raise Exception('No such input field: %s' % (name))
# ---------------------------------------------------------------------
# Get the indices of the special fields, if given to our constructor
if timeFieldName is not None:
self._timeFieldIdx = readerFieldNames.index(timeFieldName)
if resetFieldName is not None:
self._resetFieldIdx = readerFieldNames.index(resetFieldName)
if sequenceIdFieldName is not None:
self._sequenceIdFieldIdx = readerFieldNames.index(sequenceIdFieldName)
# ---------------------------------------------------------------------
# Re-order the fields to match the order in the reader and add in any
# fields from the reader that were not already in the aggregationInfo
# fields list.
self._fields = []
fieldIdx = -1
for (name, type, special) in self._inputFields:
fieldIdx += 1
# See if it exists in the aggregationInfo
found = False
for field in aggregationInfo['fields']:
if field[0] == name:
aggFunctionName = field[1]
found = True
break
if not found:
aggFunctionName = 'first'
# Convert to a function pointer and optional params
(funcPtr, params) = self._getFuncPtrAndParams(aggFunctionName)
# Add it
self._fields.append((fieldIdx, funcPtr, params))
# Is it a special field that we are still looking for?
if special == FieldMetaSpecial.reset and self._resetFieldIdx is None:
self._resetFieldIdx = fieldIdx
if special == FieldMetaSpecial.timestamp and self._timeFieldIdx is None:
self._timeFieldIdx = fieldIdx
if (special == FieldMetaSpecial.sequence and
self._sequenceIdFieldIdx is None):
self._sequenceIdFieldIdx = fieldIdx
assert self._timeFieldIdx is not None, "No time field was found"
# Create an instance of _AggregationPeriod with the aggregation period
self._aggTimeDelta = datetime.timedelta(days=aggDef['days'],
hours=aggDef['hours'],
minutes=aggDef['minutes'],
seconds=aggDef['seconds'],
milliseconds=aggDef['milliseconds'],
microseconds=aggDef['microseconds'],
weeks=aggDef['weeks'])
self._aggYears = aggDef['years']
self._aggMonths = aggDef['months']
if self._aggTimeDelta:
assert self._aggYears == 0
assert self._aggMonths == 0
def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month)
def _getFuncPtrAndParams(self, funcName):
""" Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param)
"""
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
elif funcName == 'max':
fp = max
elif funcName == 'min':
fp = min
elif funcName == 'mode':
fp = _aggr_mode
elif funcName.startswith('wmean:'):
fp = _aggr_weighted_mean
paramsName = funcName[6:]
params = [f[0] for f in self._inputFields].index(paramsName)
else:
fp = funcName
return (fp, params)
def _createAggregateRecord(self):
""" Generate the aggregated output record
Parameters:
------------------------------------------------------------------------
retval: outputRecord
"""
record = []
for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):
if aggFP is None: # this field is not supposed to be aggregated.
continue
values = self._slice[i]
refIndex = None
if paramIdx is not None:
record.append(aggFP(values, self._slice[paramIdx]))
else:
record.append(aggFP(values))
return record
def isNullAggregation(self):
""" Return True if no aggregation will be performed, either because the
aggregationInfo was None or all aggregation params within it were 0.
"""
return self._nullAggregation
def next(self, record, curInputBookmark):
""" Return the next aggregated record, if any
Parameters:
------------------------------------------------------------------------
record: The input record (values only) from the input source, or
None if the input has reached EOF (this will cause this
method to force completion of and return any partially
aggregated time period)
curInputBookmark: The bookmark to the next input record
retval:
(outputRecord, inputBookmark)
outputRecord: the aggregated record
inputBookmark: a bookmark to the last position from the input that
contributed to this aggregated record.
If we don't have any aggregated records yet, returns (None, None)
The caller should generally do a loop like this:
while True:
inRecord = reader.getNextRecord()
bookmark = reader.getBookmark()
(aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)
# reached EOF?
if inRecord is None and aggRecord is None:
break
if aggRecord is not None:
proessRecord(aggRecord, aggBookmark)
This method makes use of the self._slice member variable to build up
the values we need to aggregate. This is a dict of lists. The keys are
the field indices and the elements of each list are the values for that
field. For example:
self._siice = { 0: [42, 53], 1: [4.0, 5.1] }
"""
# This will hold the aggregated record we return
outRecord = None
# This will hold the bookmark of the last input used within the
# aggregated record we return.
retInputBookmark = None
if record is not None:
# Increment input count
self._inIdx += 1
#print self._inIdx, record
# Apply the filter, ignore the record if any field is unacceptable
if self._filter != None and not self._filter[0](self._filter[1], record):
return (None, None)
# If no aggregation info just return as-is
if self._nullAggregation:
return (record, curInputBookmark)
# ----------------------------------------------------------------------
# Do aggregation
#
# Remember the very first record time stamp - it will be used as
# the timestamp for all first records in all sequences to align
# times for the aggregation/join of sequences.
#
# For a set of aggregated records, it will use the beginning of the time
# window as a timestamp for the set
#
t = record[self._timeFieldIdx]
if self._firstSequenceStartTime == None:
self._firstSequenceStartTime = t
# Create initial startTime and endTime if needed
if self._startTime is None:
self._startTime = t
if self._endTime is None:
self._endTime = self._getEndTime(t)
assert self._endTime > t
#print 'Processing line:', i, t, endTime
#from dbgp.client import brk; brk(port=9011)
# ----------------------------------------------------------------------
# Does this record have a reset signal or sequence Id associated with it?
# If so, see if we've reached a sequence boundary
if self._resetFieldIdx is not None:
resetSignal = record[self._resetFieldIdx]
else:
resetSignal = None
if self._sequenceIdFieldIdx is not None:
currSequenceId = record[self._sequenceIdFieldIdx]
else:
currSequenceId = None
newSequence = (resetSignal == 1 and self._inIdx > 0) \
or self._sequenceId != currSequenceId \
or self._inIdx == 0
if newSequence:
self._sequenceId = currSequenceId
# --------------------------------------------------------------------
# We end the aggregation chunk if we go past the end time
# -OR- we get an out of order record (t < startTime)
sliceEnded = (t >= self._endTime or t < self._startTime)
# -------------------------------------------------------------------
# Time to generate a new output record?
if (newSequence or sliceEnded) and len(self._slice) > 0:
# Create aggregated record
# print 'Creating aggregate record...'
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
# Generate the aggregated record
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
# Reset the slice
self._slice = defaultdict(list)
# --------------------------------------------------------------------
# Add current record to slice (Note keeping slices in memory). Each
# field in the slice is a list of field values from all the sliced
# records
for j, f in enumerate(self._fields):
index = f[0]
# append the parsed field value to the proper aggregated slice field.
self._slice[j].append(record[index])
self._aggrInputBookmark = curInputBookmark
# --------------------------------------------------------------------
# If we've encountered a new sequence, start aggregation over again
if newSequence:
# TODO: May use self._firstSequenceStartTime as a start for the new
# sequence (to align all sequences)
self._startTime = t
self._endTime = self._getEndTime(t)
# --------------------------------------------------------------------
# If a slice just ended, re-compute the start and end time for the
# next aggregated record
if sliceEnded:
# Did we receive an out of order record? If so, go back and iterate
# till we get to the next end time boundary.
if t < self._startTime:
self._endTime = self._firstSequenceStartTime
while t >= self._endTime:
self._startTime = self._endTime
self._endTime = self._getEndTime(self._endTime)
# If we have a record to return, do it now
if outRecord is not None:
return (outRecord, retInputBookmark)
# ---------------------------------------------------------------------
# Input reached EOF
# Aggregate one last time in the end if necessary
elif self._slice:
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
self._slice = defaultdict(list)
# Return aggregated record
return (outRecord, retInputBookmark)
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
"""Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned.
"""
# Create the input stream
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
# Instantiate the aggregator
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
# Is it a null aggregation? If so, just return the input file unmodified
if aggregator.isNullAggregation():
return inputFullPath
# ------------------------------------------------------------------------
# If we were not given an output filename, create one based on the
# aggregation settings
if outputFilename is None:
outputFilename = 'agg_%s' % \
os.path.splitext(os.path.basename(inputFullPath))[0]
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if aggregationInfo.get(k, 0) > 0:
outputFilename += '_%s_%d' % (k, aggregationInfo[k])
outputFilename += '.csv'
outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)
# ------------------------------------------------------------------------
# If some other process already started creating this file, simply
# wait for it to finish and return without doing anything
lockFilePath = outputFilename + '.please_wait'
if os.path.isfile(outputFilename) or \
os.path.isfile(lockFilePath):
while os.path.isfile(lockFilePath):
print 'Waiting for %s to be fully written by another process' % \
lockFilePath
time.sleep(1)
return outputFilename
# Create the lock file
lockFD = open(lockFilePath, 'w')
# -------------------------------------------------------------------------
# Create the output stream
outputObj = FileRecordStream(streamID=outputFilename, write=True,
fields=inputObj.getFields())
# -------------------------------------------------------------------------
# Write all aggregated records to the output
while True:
inRecord = inputObj.getNextRecord()
(aggRecord, aggBookmark) = aggregator.next(inRecord, None)
if aggRecord is None and inRecord is None:
break
if aggRecord is not None:
outputObj.appendRecord(aggRecord)
return outputFilename
def getFilename(aggregationInfo, inputFile):
"""Generate the filename for aggregated dataset
The filename is based on the input filename and the
aggregation period.
Returns the inputFile if no aggregation required (aggregation
info has all 0's)
"""
# Find the actual file, with an absolute path
inputFile = resource_filename("nupic.datafiles", inputFile)
a = defaultdict(lambda: 0, aggregationInfo)
outputDir = os.path.dirname(inputFile)
outputFile = 'agg_%s' % os.path.splitext(os.path.basename(inputFile))[0]
noAggregation = True
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if a[k] > 0:
noAggregation = False
outputFile += '_%s_%d' % (k, a[k])
if noAggregation:
return inputFile
outputFile += '.csv'
outputFile = os.path.join(outputDir, outputFile)
return outputFile
| agpl-3.0 |
kod3r/keras | examples/imdb_cnn.py | 76 | 2878 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.datasets import imdb
'''
This example demonstrates the use of Convolution1D
for text classification.
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_cnn.py
Get to 0.8330 test accuracy after 3 epochs. 100s/epoch on K520 GPU.
'''
# set parameters:
max_features = 5000
maxlen = 100
batch_size = 32
embedding_dims = 100
nb_filters = 250
filter_length = 3
hidden_dims = 250
nb_epoch = 3
print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filters
# word group filters of size filter_length:
model.add(Convolution1D(input_dim=embedding_dims,
nb_filter=nb_filters,
filter_length=filter_length,
border_mode="valid",
activation="relu",
subsample_length=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=2))
# We flatten the output of the conv layer, so that we can add a vanilla dense layer:
model.add(Flatten())
# Computing the output shape of a conv layer can be tricky;
# for a good tutorial, see: http://cs231n.github.io/convolutional-networks/
output_size = nb_filters * (((maxlen - filter_length) / 1) + 1) / 2
# We add a vanilla hidden layer:
model.add(Dense(output_size, hidden_dims))
model.add(Dropout(0.25))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(hidden_dims, 1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode="binary")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, y_test))
| mit |
rahuldhote/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 252 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
BlazeLoader/BlazeLoader | util/apply_at.py | 3 | 2819 | """
This is a temporary and messy way to apply the AccessTransformer,
the installer will take care of it when it is ready.
"""
import sys
import os
import subprocess
import shlex
def get_cp_sep():
os_name = sys.platform
if os_name.startswith('win'):
return ';'
return ':'
mc_ver = '1.7.2'
mcp_dir = '../'
jars_dir = os.path.join(mcp_dir, 'jars')
libraries_dir = os.path.join(jars_dir, 'libraries')
versions_dir = os.path.join(jars_dir, 'versions', '%s' % mc_ver)
cp_sep = get_cp_sep()
libs = '../jars/libraries/net/minecraft/launchwrapper/1.9/launchwrapper-1.9.jar;../jars/libraries/org/ow2/asm/asm-debug-all/4.1/asm-debug-all-4.1.jar;./res/bl_at.cfg'.replace('/', os.sep)
libs = libs.replace(';', cp_sep)
at = 'net/acomputerdog/BlazeLoader/transformers/BLAccessTransformer'.replace('/', os.sep)
jar_target = '../jars/versions/"{mc_version}"/"{mc_version}".jar'.format(mc_version = mc_ver)
compile_cmd = 'javac -cp' + ' ' + libs + ' ' + '-d bin ' + 'src/' + at + '.java'
run_cmd = 'java -cp bin;'.replace(';', cp_sep) + libs + ' ' + at.replace(os.sep, '.') + ' ' + jar_target + ' ' + './res/bl_at.cfg'
def check_install():
print '> Checking installation'
if not os.path.isfile(os.path.join(versions_dir, '%s.jar' % mc_ver)):
print '!!!! Minecraft %s jar not found!' % mc_ver
return False
if not os.path.isfile(os.path.join(versions_dir, '%s.json' % mc_ver)):
print '!!!! Minecraft %s json not found!' % mc_ver
return False
if not os.path.isfile(os.path.join(libraries_dir, 'org', 'ow2', 'asm', 'asm-debug-all', '4.1', 'asm-debug-all-4.1.jar')):
print '!!!! asm-debug-all-4.1.jar not found! get it at http://mvnrepository.com/artifact/org.ow2.asm/asm-debug-all/4.1'
return False
if not os.path.isfile(os.path.join(libraries_dir, 'net', 'minecraft', 'launchwrapper', '1.9', 'launchwrapper-1.9.jar')):
print '!!!! launchwrapper-1.9 not found!'
return False
if not os.path.exists('bin') or not os.path.isdir('bin'):
os.makedirs('bin')
print '> Found all reqquired files'
return True
def compile_at():
print '> Compiling AccessTransformer'
shlex.split(compile_cmd)
process = subprocess.Popen(compile_cmd, shell=True, stdout=subprocess.PIPE)
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if nextline != '':
print nextline
print '> Compiled AccessTransformer'
def run_at():
print '> Running AccessTransformer'
shlex.split(run_cmd)
process = subprocess.Popen(run_cmd, shell=True, stdout=subprocess.PIPE)
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if nextline != '':
print nextline
print '> AccessTransformer was successfully applied'
if not check_install():
sys.exit(1)
compile_at()
run_at() | bsd-2-clause |
ron1818/Singaboat_RobotX2016 | robotx_nav/nodes/task1_nonprocess_1.py | 3 | 5759 | #!/usr/bin/env python
import rospy
import math
import time
import numpy as np
import os
import tf
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Twist, Vector3
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_force_cancel import ForceCancel
from tf.transformations import euler_from_quaternion
from move_base_waypoint import MoveTo
from nav_msgs.msg import Odometry
class PassGates(object):
MAX_DATA=30 #stash data size of marker array for clustering
mid_point_counter_max=20
distance=20 #distance to offset from center of gates
replan_min=5 #replan waypoints if changes is more than this
x0, y0, yaw0= 0, 0, 0
markers_array=MarkerArray()
red_totem=np.zeros((MAX_DATA, 2)) #unordered list
green_totem=np.zeros((MAX_DATA, 2))
red_centers=np.zeros((1, 2)) #ordered list of centers x, y
green_centers=np.zeros((1, 2))
red_counter=0
green_counter=0
mid_point_counter=0
mid_point_x=list()
mid_point_y=list()
termination_displacement=50
m=0
d=10
def __init__(self):
print("starting task 1")
rospy.init_node('task_1', anonymous=True)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.marker_callback, queue_size = 50)
self.marker_pub= rospy.Publisher('waypoint_markers', Marker, queue_size=5)
self.cmd_vel_pub = rospy.Publisher('move_base_cmd_vel', Twist, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, mode=1, mode_param=1, is_relative=False)
init_position =np.array([self.x0, self.y0, 0])
while(self.red_counter<self.MAX_DATA or self.green_counter<self.MAX_DATA):
#wait for data bucket to fill up, rotate around
self.rotation(math.pi/4)
time.sleep(3)
self.rotation(-math.pi/2)
time.sleep(1)
print("bucket is full")
while not rospy.is_shutdown():
#go to mid target
if self.mid_point_counter<self.mid_point_counter_max: #at first move to mid point
self.move_to_goal([self.mid_point_x[0], self.mid_point_y[0], math.pi/2+math.atan2(self.red_centers[0][1]-self.green_centers[0][1],self.red_centers[0][0]-self.green_centers[0][0])])
elif self.mid_point_counter > self.mid_point_counter_max: # later go along the direction of midpoint line
angle=math.atan(self.m)
self.move_to_goal([self.x0+self.d*math.sin(angle), self.y0+self.d*math.cos(angle), angle])
#pass all of the pool, stop
if self.euclid_distance(np.array([self.x0, self.y0, 0]), init_position)>self.termination_displacement:
print("Task 1 completed")
break
def marker_callback(self, msg):
if len(msg.markers)>0:
for i in range(len(msg.markers)):
if msg.markers[i].type == 3:
#may append more than 1 markers
if msg.markers[i].id == 0:
self.red_totem[self.red_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.red_counter+=1
elif msg.markers[i].id == 1:
self.green_totem[self.green_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.green_counter+=1
else:
pass
#list is full
if (self.red_counter>self.MAX_DATA):
red_kmeans = KMeans(n_clusters=1).fit(self.red_totem)
self.red_centers=red_kmeans.cluster_centers_
if(self.green_counter>self.MAX_DATA):
green_kmeans = KMeans(n_clusters=1).fit(self.green_totem)
self.green_centers=green_kmeans.cluster_centers_
#updating the mid point
if(self.red_counter>self.MAX_DATA and self.green_counter>self.MAX_DATA):
self.mid_point_x.append((self.red_centers[0][0]+self.green_centers[0][0])/2)
self.mid_point_y.append((self.red_centers[0][1]+self.green_centers[0][1])/2)
self.mid_point_counter+=1
self.m, self.b = np.polyfit(self.mid_point_x, self.mid_point_y, 1)
def move_to_goal(self, goal):
print("move to point")
self.moveto_obj.respawn(goal, )
def euclid_distance(self, target1, target2):
return math.sqrt((target1[0]-target2[0])**2+(target1[1]-target2[1])**2)
def rotation(self, ang):
rate = rospy.Rate(10)
an_vel = 0.1
duration = ang / an_vel
msg = Twist(Vector3(0.0, 0.0, 0.0), Vector3(0.0, 0.0, an_vel))
rate.sleep()
start_time = rospy.get_time()
while not rospy.is_shutdown():
current_time = rospy.get_time()
if (current_time - start_time) > duration:
self.cmd_vel_pub.publish(Twist(Vector3(0, 0.0, 0.0), Vector3(0.0, 0.0, -2 * an_vel)))
self.cmd_vel_pub.publish(Twist())
break
else:
self.cmd_vel_pub.publish(msg)
rate.sleep()
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
PassGates()
# stage 1: gps
except rospy.ROSInterruptException:
rospy.loginfo("Task 1 Finished") | gpl-3.0 |
mcgachey/edx-platform | lms/djangoapps/course_blocks/transformers/tests/test_user_partitions.py | 26 | 17615 | # pylint: disable=attribute-defined-outside-init, protected-access
"""
Tests for UserPartitionTransformer.
"""
from collections import namedtuple
import ddt
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort
from openedx.core.djangoapps.course_groups.views import link_cohort_to_partition_group
from student.tests.factories import CourseEnrollmentFactory
from xmodule.partitions.partitions import Group, UserPartition
from xmodule.modulestore.tests.factories import CourseFactory
from ...api import get_course_blocks
from ..user_partitions import UserPartitionTransformer, _MergedGroupAccess
from .test_helpers import CourseStructureTestCase, update_block
class UserPartitionTestMixin(object):
"""
Helper Mixin for testing user partitions.
"""
def setup_groups_partitions(self, num_user_partitions=1, num_groups=4):
"""
Sets up groups and user partitions for testing.
"""
# Set up groups
self.groups = []
for group_num in range(1, num_groups + 1):
self.groups.append(Group(group_num, 'Group ' + unicode(group_num)))
# Set up user partitions
self.user_partitions = []
for user_partition_num in range(1, num_user_partitions + 1):
user_partition = UserPartition(
id=user_partition_num,
name='Partition ' + unicode(user_partition_num),
description='This is partition ' + unicode(user_partition_num),
groups=self.groups,
scheme=CohortPartitionScheme
)
user_partition.scheme.name = "cohort"
self.user_partitions.append(user_partition)
def setup_cohorts(self, course):
"""
Sets up a cohort for each previously created user partition.
"""
config_course_cohorts(course, is_cohorted=True)
self.partition_cohorts = []
for user_partition in self.user_partitions:
partition_cohorts = []
for group in self.groups:
cohort = CohortFactory(course_id=course.id)
partition_cohorts.append(cohort)
link_cohort_to_partition_group(
cohort,
user_partition.id,
group.id,
)
self.partition_cohorts.append(partition_cohorts)
@ddt.ddt
class UserPartitionTransformerTestCase(UserPartitionTestMixin, CourseStructureTestCase):
"""
UserPartitionTransformer Test
"""
def setUp(self):
"""
Setup course structure and create user for user partition
transformer test.
"""
super(UserPartitionTransformerTestCase, self).setUp()
# Set up user partitions and groups.
self.setup_groups_partitions()
self.user_partition = self.user_partitions[0]
# Build course.
self.course_hierarchy = self.get_course_hierarchy()
self.blocks = self.build_course(self.course_hierarchy)
self.course = self.blocks['course']
# Enroll user in course.
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True)
# Set up cohorts.
self.setup_cohorts(self.course)
self.transformer = UserPartitionTransformer()
def get_course_hierarchy(self):
"""
Returns a course hierarchy to test with.
"""
# course
# / \
# / \
# A[1, 2, 3] B
# / | \ |
# / | \ |
# / | \ |
# C[1, 2] D[2, 3] E /
# / | \ | / \ /
# / | \ | / \ /
# / | \ | / \ /
# F G[1] H[2] I J K[4] /
# / \ / / \ /
# / \ / / \ /
# / \ / / \/
# L[1, 2] M[1, 2, 3] N O
#
return [
{
'org': 'UserPartitionTransformer',
'course': 'UP101F',
'run': 'test_run',
'user_partitions': [self.user_partition],
'#type': 'course',
'#ref': 'course',
'#children': [
{
'#type': 'vertical',
'#ref': 'A',
'metadata': {'group_access': {self.user_partition.id: [0, 1, 2, 3]}},
},
{'#type': 'vertical', '#ref': 'B'},
],
},
{
'#type': 'vertical',
'#ref': 'C',
'#parents': ['A'],
'metadata': {'group_access': {self.user_partition.id: [1, 2]}},
'#children': [
{'#type': 'vertical', '#ref': 'F'},
{
'#type': 'vertical',
'#ref': 'G',
'metadata': {'group_access': {self.user_partition.id: [1]}},
},
{
'#type': 'vertical',
'#ref': 'H',
'metadata': {'group_access': {self.user_partition.id: [2]}},
},
],
},
{
'#type': 'vertical',
'#ref': 'D',
'#parents': ['A'],
'metadata': {'group_access': {self.user_partition.id: [2, 3]}},
'#children': [{'#type': 'vertical', '#ref': 'I'}],
},
{
'#type': 'vertical',
'#ref': 'E',
'#parents': ['A'],
'#children': [{'#type': 'vertical', '#ref': 'J'}],
},
{
'#type': 'vertical',
'#ref': 'K',
'#parents': ['E'],
'metadata': {'group_access': {self.user_partition.id: [4]}},
'#children': [{'#type': 'vertical', '#ref': 'N'}],
},
{
'#type': 'vertical',
'#ref': 'L',
'#parents': ['G'],
'metadata': {'group_access': {self.user_partition.id: [1, 2]}},
},
{
'#type': 'vertical',
'#ref': 'M',
'#parents': ['G', 'H'],
'metadata': {'group_access': {self.user_partition.id: [1, 2, 3]}},
},
{
'#type': 'vertical',
'#ref': 'O',
'#parents': ['K', 'B'],
},
]
@ddt.data(
(None, ('course', 'B', 'O')),
(1, ('course', 'A', 'B', 'C', 'E', 'F', 'G', 'J', 'L', 'M', 'O')),
(2, ('course', 'A', 'B', 'C', 'D', 'E', 'F', 'H', 'I', 'J', 'M', 'O')),
(3, ('course', 'A', 'B', 'D', 'E', 'I', 'J', 'O')),
(4, ('course', 'B', 'O')),
)
@ddt.unpack
def test_transform(self, group_id, expected_blocks):
if group_id:
cohort = self.partition_cohorts[self.user_partition.id - 1][group_id - 1]
add_user_to_cohort(cohort, self.user.username)
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer}
)
self.assertSetEqual(
set(trans_block_structure.get_block_keys()),
self.get_block_key_set(self.blocks, *expected_blocks)
)
@ddt.ddt
class MergedGroupAccessTestData(UserPartitionTestMixin, CourseStructureTestCase):
"""
_MergedGroupAccess Test
"""
def setUp(self):
"""
Setup course structure and create user for user partition
transformer test.
"""
super(MergedGroupAccessTestData, self).setUp()
# Set up multiple user partitions and groups.
self.setup_groups_partitions(num_user_partitions=3)
self.course = CourseFactory.create(user_partitions=self.user_partitions)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True)
# Set up cohorts.
self.setup_cohorts(self.course)
def get_course_hierarchy(self):
"""
Returns a course hierarchy to test with.
"""
# The block tree is as follows, with the numbers in the brackets
# specifying the group_id for each of the 3 partitions.
# A
# / | \
# / | \
# B C D
# [1][3][] [2][2][] [3][1][]
# \ /
# \ /
# E
#
return [
{
'org': 'MergedGroupAccess',
'course': 'MGA101F',
'run': 'test_run',
'user_partitions': self.user_partitions,
'#type': 'course',
'#ref': 'A',
},
{
'#type': 'vertical',
'#ref': 'B',
'#parents': ['A'],
'metadata': {'group_access': {1: [1], 2:[3], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'C',
'#parents': ['A'],
'metadata': {'group_access': {1: [2], 2:[2], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'D',
'#parents': ['A'],
'metadata': {'group_access': {1: [3], 2:[1], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'E',
'#parents': ['B', 'C'],
},
]
AccessTestData = namedtuple(
'AccessTestData',
['partition_groups', 'xblock_access', 'merged_parents_list', 'expected_access'],
)
AccessTestData.__new__.__defaults__ = ({}, None, [], False)
@ddt.data(
# universal access throughout
AccessTestData(expected_access=True),
AccessTestData(xblock_access={1: None}, expected_access=True),
AccessTestData(xblock_access={1: []}, expected_access=True),
# partition 1 requiring membership in group 1
AccessTestData(xblock_access={1: [1]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1]}),
AccessTestData(partition_groups={1: 1, 2: 1, 3: 1}, xblock_access={1: [1]}, expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1], 2: [], 3: []}, expected_access=True),
# partitions 1 and 2 requiring membership in group 1
AccessTestData(xblock_access={1: [1], 2: [1]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1], 2: [1]}),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1], 2: [1]}, expected_access=True),
# partitions 1 and 2 requiring membership in different groups
AccessTestData(xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={1: 1, 2: 1, 3: 1}, xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1], 2: [2]}, expected_access=True),
# partitions 1 and 2 requiring membership in list of groups
AccessTestData(partition_groups={1: 3, 2: 3}, xblock_access={1: [1, 2], 2: [1, 2]}),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 2, 2: 1}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 2, 2: 2}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
# parent inheritance
# 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {1}}], expected_access=True),
# 2 parents allow
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {1}}, {1: {1}}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {2}}, {1: {1}}], expected_access=True),
AccessTestData(
partition_groups={1: 1, 2: 2},
merged_parents_list=[{1: {2}, 2: {2}}, {1: {1}, 2: {1}}],
expected_access=True,
),
# 1 parent denies
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}]),
# 1 parent denies, 1 parent allows all
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {1: {}}, {}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {}, {1: {}}], expected_access=True),
# 1 parent denies, 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}, {1: {1}}], expected_access=True),
# 2 parents deny
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}, {1: {3}, 2: {2}}]),
# intersect with parent
# child denies, 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [3]}, merged_parents_list=[{1: {1}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [2]}, merged_parents_list=[{1: {1}}]),
# child denies, 2 parents allow
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [3]}, merged_parents_list=[{1: {1}}, {2: {2}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [3]}, merged_parents_list=[{1: {1}}, {2: {2}}]),
# child allows, 1 parent denies
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1]}, merged_parents_list=[{1: {2}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {2}}]),
# child allows, 1 parent allows
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1]},
merged_parents_list=[{}],
expected_access=True,
),
AccessTestData(
partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {1}}], expected_access=True
),
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1, 3], 2: [2, 3]},
merged_parents_list=[{1: {1, 2, 3}}, {2: {1, 2, 3}}],
expected_access=True,
),
# child allows, 1 parent allows, 1 parent denies
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1]},
merged_parents_list=[{1: {3}}, {1: {1}}],
expected_access=True,
),
)
@ddt.unpack
def test_merged_group_access(self, user_partition_groups, xblock_access, merged_parents_list, expected_access):
# use the course as the block to test
block = self.course
# update block access
if xblock_access is not None:
block.group_access = xblock_access
update_block(self.course)
# convert merged_parents_list to _MergedGroupAccess objects
for ind, merged_parent in enumerate(merged_parents_list):
converted_object = _MergedGroupAccess([], block, [])
converted_object._access = merged_parent
merged_parents_list[ind] = converted_object
merged_group_access = _MergedGroupAccess(self.user_partitions, block, merged_parents_list)
# convert group_id to groups in user_partition_groups parameter
for partition_id, group_id in user_partition_groups.iteritems():
user_partition_groups[partition_id] = self.groups[group_id - 1]
self.assertEquals(
merged_group_access.check_group_access(user_partition_groups),
expected_access,
)
@ddt.data(
([None], None),
([{1}, None], {1}),
([None, {1}], {1}),
([None, {1}, {1, 2}], {1}),
([None, {1, 2}, {1, 2}], {1, 2}),
([{1, 2, 3}, {1, 2}, None], {1, 2}),
([{1, 2}, {1, 2, 3, 4}, None], {1, 2}),
([{1}, {2}, None], set()),
([None, {1}, {2}, None], set()),
)
@ddt.unpack
def test_intersection_method(self, input_value, expected_result):
self.assertEquals(
_MergedGroupAccess._intersection(*input_value),
expected_result,
)
| agpl-3.0 |
vitaly-krugl/nupic | src/nupic/frameworks/opf/experiment_runner.py | 10 | 29504 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script provides the runExperiment() API function that is used
by the command-line client run_opf_experiment.py of Online Prediction
Framework (OPF). It executes a single experiment.
This runner is generally run through `scripts/run_opf_experiment.py`.
"""
from collections import namedtuple
import itertools
import logging
import optparse
import os
import sys
import random
import numpy
from nupic.data import json_helpers
from nupic.frameworks.opf import opf_basic_environment, helpers
from nupic.frameworks.opf.exp_description_api import OpfEnvironment
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.opf_task_driver import OPFTaskDriver
from nupic.frameworks.opf.opf_utils import (InferenceElement, matchPatterns,
validateOpfJsonValue)
from nupic.support import initLogging
g_defaultCheckpointExtension = ".nta"
# Schema of the Private Command-line Options dictionary returned by
# _parseCommandLineOptions(). This "Private" options dict is consumed internally
# by runExperiment (i.e. not passed to external modules).
g_parsedPrivateCommandLineOptionsSchema = {
"description":"OPF RunExperiment control args",
"type":"object",
"additionalProperties":False,
"properties":{
"createCheckpointName":{
"description":"Create a model and save it under the checkpoint name, " + \
"but don't run it. " + \
"TODO: 'blank' is a non-standard JSON schema setting; " + \
"validictory 8.0 supports a blank_by_default arg.",
"required":True,
"type":"string",
"minLength":0,
"blank":True
},
"listAvailableCheckpoints":{
"description":"List all checkpoints and exit",
"required":True,
"type":"boolean"
},
"listTasks":{
"description":"List all tasks and exit",
"required":True,
"type":"boolean"
},
"runCheckpointName":{
"description":"Name of saved checkpoint to load and run" + \
"TODO: 'blank' is a non-standard JSON schema setting; " + \
"validictory 8.0 supports a blank_by_default arg.",
"required":True,
"type":"string",
"minLength":0,
"blank":True
},
"newSerialization":{
"description":"Use new capnproto serialization.",
"required":True,
"type":"boolean"
},
#"reuseDatasets":{
# "description":"Keep existing generated/aggregated datasets",
# "required":True,
# "type":"boolean"
#},
"testMode":{
"description":"True to override iteration counts with very small values",
"required":True,
"type":"boolean"
},
"taskLabels":{
"required":False,
"type":"array",
"uniqueItems":False,
"minItems":0,
"items":{"type":"string", "minLength":1}
},
"checkpointModel":{
"description":"True to checkpoint model after running each task",
"required":True,
"type":"boolean"
},
}
}
def runExperiment(args, model=None):
"""
Run a single OPF experiment.
.. note:: The caller is responsible for initializing python logging before
calling this function (e.g., import :mod:`nupic.support`;
:meth:`nupic.support.initLogging`)
See also: :meth:`.initExperimentPrng`.
:param args: (string) Experiment command-line args list. Too see all options,
run with ``--help``:
.. code-block:: text
Options:
-h, --help show this help message and exit
-c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT>
name, but don't run it
--listCheckpoints List all available checkpoints
--listTasks List all task labels in description.py
--load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it.
Run with --listCheckpoints flag for more details.
--newSerialization Use new capnproto serialization
--tasks Run the tasks with the given TASK LABELS in the order
they are given. Either end of arg-list, or a
standalone dot ('.') arg or the next short or long
option name (-a or --blah) terminates the list. NOTE:
FAILS TO RECOGNIZE task label names with one or more
leading dashes. [default: run all of the tasks in
description.py]
--testMode Reduce iteration count for testing
--noCheckpoint Don't checkpoint the model after running each task.
:param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may
pass in an existing OPF Model to use instead of creating a new one.
:returns: (:class:`~nupic.frameworks.opf.model.Model`)
reference to OPF Model instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
"""
# Parse command-line options
opt = _parseCommandLineOptions(args)
#print "runExperiment: Parsed Command Options: ", opt
model = _runExperimentImpl(opt, model)
return model
def initExperimentPrng():
"""Initialize PRNGs that may be used by other modules in the experiment stack.
.. note:: User may call this function to initialize PRNGs that are used by the
experiment stack before calling runExperiment(), unless user has its own
own logic for initializing these PRNGs.
"""
seed = 42
random.seed(seed)
numpy.random.seed(seed)
ParseCommandLineOptionsResult = namedtuple('ParseCommandLineOptionsResult',
('experimentDir', 'privateOptions'))
"""_parseCommandLineOptions() return value type
Args:
experimentDir: path of experiment directory that contains description.py
privateOptions: dictionary of options of consumption only by this script;
the schema is described by g_parsedPrivateCommandLineOptionsSchema
"""
def _parseCommandLineOptions(args):
"""Parse command line options
Args:
args: command line arguments (not including sys.argv[0])
Returns:
namedtuple ParseCommandLineOptionsResult
"""
usageStr = (
"%prog [options] descriptionPyDirectory\n"
"This script runs a single OPF Model described by description.py "
"located in the given directory."
)
parser = optparse.OptionParser(usage=usageStr)
parser.add_option("-c",
help="Create a model and save it under the given "
"<CHECKPOINT> name, but don't run it",
dest="createCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--listCheckpoints",
help="List all available checkpoints",
dest="listAvailableCheckpoints",
action="store_true", default=False)
parser.add_option("--listTasks",
help="List all task labels in description.py",
dest="listTasks",
action="store_true", default=False)
parser.add_option("--load",
help="Load a model from the given <CHECKPOINT> and run it. "
"Run with --listCheckpoints flag for more details. ",
dest="runCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--newSerialization",
help="Use new capnproto serialization",
dest="newSerialization",
action="store_true", default=False)
#parser.add_option("--reuseDatasets",
# help="Keep existing generated/aggregated datasets",
# dest="reuseDatasets", action="store_true",
# default=False)
parser.add_option("--tasks",
help="Run the tasks with the given TASK LABELS "
"in the order they are given. Either end of "
"arg-list, or a standalone dot ('.') arg or "
"the next short or long option name (-a or "
"--blah) terminates the list. NOTE: FAILS "
"TO RECOGNIZE task label names with one or more "
"leading dashes. [default: run all of the tasks in "
"description.py]",
dest="taskLabels", default=[],
action="callback", callback=reapVarArgsCallback,
metavar="TASK_LABELS")
parser.add_option("--testMode",
help="Reduce iteration count for testing",
dest="testMode", action="store_true",
default=False)
parser.add_option("--noCheckpoint",
help="Don't checkpoint the model after running each task.",
dest="checkpointModel", action="store_false",
default=True)
options, experiments = parser.parse_args(args)
# Validate args
mutuallyExclusiveOptionCount = sum([bool(options.createCheckpointName),
options.listAvailableCheckpoints,
options.listTasks,
bool(options.runCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c, --listCheckpoints, --listTasks, and --load are "
"mutually exclusive. Please select only one")
mutuallyExclusiveOptionCount = sum([bool(not options.checkpointModel),
bool(options.createCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c and --noCheckpoint are "
"mutually exclusive. Please select only one")
if len(experiments) != 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Exactly ONE experiment must be specified, but got %s (%s)" % (
len(experiments), experiments))
# Done with parser
parser.destroy()
# Prepare results
# Directory path of the experiment (that contain description.py)
experimentDir = os.path.abspath(experiments[0])
# RunExperiment.py's private options (g_parsedPrivateCommandLineOptionsSchema)
privateOptions = dict()
privateOptions['createCheckpointName'] = options.createCheckpointName
privateOptions['listAvailableCheckpoints'] = options.listAvailableCheckpoints
privateOptions['listTasks'] = options.listTasks
privateOptions['runCheckpointName'] = options.runCheckpointName
privateOptions['newSerialization'] = options.newSerialization
privateOptions['testMode'] = options.testMode
#privateOptions['reuseDatasets'] = options.reuseDatasets
privateOptions['taskLabels'] = options.taskLabels
privateOptions['checkpointModel'] = options.checkpointModel
result = ParseCommandLineOptionsResult(experimentDir=experimentDir,
privateOptions=privateOptions)
return result
def reapVarArgsCallback(option, optStr, value, parser):
"""Used as optparse callback for reaping a variable number of option args.
The option may be specified multiple times, and all the args associated with
that option name will be accumulated in the order that they are encountered
"""
newValues = []
# Reap the args, taking care to stop before the next option or '.'
gotDot = False
for arg in parser.rargs:
# Stop on --longname options
if arg.startswith("--") and len(arg) > 2:
break
# Stop on -b options
if arg.startswith("-") and len(arg) > 1:
break
if arg == ".":
gotDot = True
break
newValues.append(arg)
if not newValues:
raise optparse.OptionValueError(
("Empty arg list for option %r expecting one or more args "
"(remaining tokens: %r)") % (optStr, parser.rargs))
del parser.rargs[:len(newValues) + int(gotDot)]
# Retrieve the existing arg accumulator, if any
value = getattr(parser.values, option.dest, [])
#print "Previous value: %r" % value
if value is None:
value = []
# Append the new args to the existing ones and save to the parser
value.extend(newValues)
setattr(parser.values, option.dest, value)
def _reportCommandLineUsageErrorAndExit(parser, message):
"""Report usage error and exit program with error indication."""
print parser.get_usage()
print message
sys.exit(1)
def _runExperimentImpl(options, model=None):
"""Creates and runs the experiment
Args:
options: namedtuple ParseCommandLineOptionsResult
model: For testing: may pass in an existing OPF Model instance
to use instead of creating a new one.
Returns: reference to OPFExperiment instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
"""
json_helpers.validate(options.privateOptions,
schemaDict=g_parsedPrivateCommandLineOptionsSchema)
# Load the experiment's description.py module
experimentDir = options.experimentDir
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
# Handle "list checkpoints" request
if options.privateOptions['listAvailableCheckpoints']:
_printAvailableCheckpoints(experimentDir)
return None
# Load experiment tasks
experimentTasks = expIface.getModelControl().get('tasks', [])
# If the tasks list is empty, and this is a nupic environment description
# file being run from the OPF, convert it to a simple OPF description file.
if (len(experimentTasks) == 0 and
expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
expIface.convertNupicEnvToOPF()
experimentTasks = expIface.getModelControl().get('tasks', [])
# Ensures all the source locations are either absolute paths or relative to
# the nupic.datafiles package_data location.
expIface.normalizeStreamSources()
# Extract option
newSerialization = options.privateOptions['newSerialization']
# Handle listTasks
if options.privateOptions['listTasks']:
print "Available tasks:"
for label in [t['taskLabel'] for t in experimentTasks]:
print "\t", label
return None
# Construct the experiment instance
if options.privateOptions['runCheckpointName']:
assert model is None
checkpointName = options.privateOptions['runCheckpointName']
model = ModelFactory.loadFromCheckpoint(
savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
newSerialization=newSerialization)
elif model is not None:
print "Skipping creation of OPFExperiment instance: caller provided his own"
else:
modelDescription = expIface.getModelDescription()
model = ModelFactory.create(modelDescription)
# Handle "create model" request
if options.privateOptions['createCheckpointName']:
checkpointName = options.privateOptions['createCheckpointName']
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=checkpointName,
newSerialization=newSerialization)
return model
# Build the task list
# Default task execution index list is in the natural list order of the tasks
taskIndexList = range(len(experimentTasks))
customTaskExecutionLabelsList = options.privateOptions['taskLabels']
if customTaskExecutionLabelsList:
taskLabelsList = [t['taskLabel'] for t in experimentTasks]
taskLabelsSet = set(taskLabelsList)
customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)
assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
("Some custom-provided task execution labels don't correspond "
"to actual task labels: mismatched labels: %r; actual task "
"labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
customTaskExecutionLabelsList)
taskIndexList = [taskLabelsList.index(label) for label in
customTaskExecutionLabelsList]
print "#### Executing custom task list: %r" % [taskLabelsList[i] for
i in taskIndexList]
# Run all experiment tasks
for taskIndex in taskIndexList:
task = experimentTasks[taskIndex]
# Create a task runner and run it!
taskRunner = _TaskRunner(model=model,
task=task,
cmdOptions=options)
taskRunner.run()
del taskRunner
if options.privateOptions['checkpointModel']:
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=task['taskLabel'],
newSerialization=newSerialization)
return model
def _saveModel(model, experimentDir, checkpointLabel, newSerialization=False):
"""Save model"""
checkpointDir = _getModelCheckpointDir(experimentDir, checkpointLabel)
if newSerialization:
model.writeToCheckpoint(checkpointDir)
else:
model.save(saveModelDir=checkpointDir)
def _getModelCheckpointDir(experimentDir, checkpointLabel):
"""Creates directory for serialization of the model
checkpointLabel:
Checkpoint label (string)
Returns:
absolute path to the serialization directory
"""
checkpointDir = os.path.join(getCheckpointParentDir(experimentDir),
checkpointLabel + g_defaultCheckpointExtension)
checkpointDir = os.path.abspath(checkpointDir)
return checkpointDir
def getCheckpointParentDir(experimentDir):
"""Get checkpoint parent dir.
Returns: absolute path to the base serialization directory within which
model checkpoints for this experiment are created
"""
baseDir = os.path.join(experimentDir, "savedmodels")
baseDir = os.path.abspath(baseDir)
return baseDir
def _checkpointLabelFromCheckpointDir(checkpointDir):
"""Returns a checkpoint label string for the given model checkpoint directory
checkpointDir: relative or absolute model checkpoint directory path
"""
assert checkpointDir.endswith(g_defaultCheckpointExtension)
lastSegment = os.path.split(checkpointDir)[1]
checkpointLabel = lastSegment[0:-len(g_defaultCheckpointExtension)]
return checkpointLabel
def _isCheckpointDir(checkpointDir):
"""Return true iff checkpointDir appears to be a checkpoint directory."""
lastSegment = os.path.split(checkpointDir)[1]
if lastSegment[0] == '.':
return False
if not checkpointDir.endswith(g_defaultCheckpointExtension):
return False
if not os.path.isdir(checkpointDir):
return False
return True
def _printAvailableCheckpoints(experimentDir):
"""List available checkpoints for the specified experiment."""
checkpointParentDir = getCheckpointParentDir(experimentDir)
if not os.path.exists(checkpointParentDir):
print "No available checkpoints."
return
checkpointDirs = [x for x in os.listdir(checkpointParentDir)
if _isCheckpointDir(os.path.join(checkpointParentDir, x))]
if not checkpointDirs:
print "No available checkpoints."
return
print "Available checkpoints:"
checkpointList = [_checkpointLabelFromCheckpointDir(x)
for x in checkpointDirs]
for checkpoint in sorted(checkpointList):
print "\t", checkpoint
print
print "To start from a checkpoint:"
print " python run_opf_experiment.py experiment --load <CHECKPOINT>"
print "For example, to start from the checkpoint \"MyCheckpoint\":"
print " python run_opf_experiment.py experiment --load MyCheckpoint"
class _TaskRunner(object):
"""This class is responsible for running a single experiment task on the
given Model instance
"""
__FILE_SCHEME = "file://"
def __init__(self, model, task, cmdOptions):
""" Constructor
Args:
model: The OPF Model instance against which to run the task
task: A dictionary conforming to opfTaskSchema.json
cmdOptions: ParseCommandLineOptionsResult namedtuple
"""
validateOpfJsonValue(task, "opfTaskSchema.json")
# Set up our logger
self.__logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
#self.__logger.setLevel(logging.DEBUG)
self.__logger.debug(("Instantiated %s(" + \
"model=%r, " + \
"task=%r, " + \
"cmdOptions=%r)") % \
(self.__class__.__name__,
model,
task,
cmdOptions))
# Generate a new dataset from streamDef and create the dataset reader
streamDef = task['dataset']
datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)
self.__model = model
self.__datasetReader = datasetReader
self.__task = task
self.__cmdOptions = cmdOptions
self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(
fields=model.getFieldInfo(),
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel'],
inferenceType=self.__model.getInferenceType())
taskControl = task['taskControl']
# Create Task Driver
self.__taskDriver = OPFTaskDriver(
taskControl=taskControl,
model=model)
loggedMetricPatterns = taskControl.get('loggedMetrics', None)
loggedMetricLabels = matchPatterns(loggedMetricPatterns,
self.__taskDriver.getMetricLabels())
self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)
# Create a prediction metrics logger
self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel'])
def __del__(self):
"""Destructor"""
#print "IN %s.%r destructor" % (type(self), self)
def run(self):
"""Runs a single experiment task"""
self.__logger.debug("run(): Starting task <%s>", self.__task['taskLabel'])
# Set up the task
# Create our main loop-control iterator
if self.__cmdOptions.privateOptions['testMode']:
numIters = 10
else:
numIters = self.__task['iterationCount']
if numIters >= 0:
iterTracker = iter(xrange(numIters))
else:
iterTracker = iter(itertools.count())
# Initialize periodic activities
periodic = PeriodicActivityMgr(
requestedActivities=self._createPeriodicActivities())
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
# Have Task Driver perform its initial setup activities, including setup
# callbacks
self.__taskDriver.setup()
# Run it!
while True:
# Check controlling iterator first
try:
next(iterTracker)
except StopIteration:
break
# Read next input record
try:
inputRecord = self.__datasetReader.next()
except StopIteration:
break
# Process input record
result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
self.__predictionLogger.writeRecord(result)
# Run periodic activities
periodic.tick()
# Dump the experiment metrics at the end of the task
self._getAndEmitExperimentMetrics(final=True)
# Have Task Driver perform its final activities
self.__taskDriver.finalize()
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
def _createPeriodicActivities(self):
"""Creates and returns a list of activites for this TaskRunner instance
Returns: a list of PeriodicActivityRequest elements
"""
# Initialize periodic activities
periodicActivities = []
# Metrics reporting
class MetricsReportCb(object):
def __init__(self, taskRunner):
self.__taskRunner = taskRunner
return
def __call__(self):
self.__taskRunner._getAndEmitExperimentMetrics()
reportMetrics = PeriodicActivityRequest(
repeating=True,
period=1000,
cb=MetricsReportCb(self))
periodicActivities.append(reportMetrics)
# Iteration progress
class IterationProgressCb(object):
PROGRESS_UPDATE_PERIOD_TICKS = 1000
def __init__(self, taskLabel, requestedIterationCount, logger):
self.__taskLabel = taskLabel
self.__requestedIterationCount = requestedIterationCount
self.__logger = logger
self.__numIterationsSoFar = 0
def __call__(self):
self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKS
self.__logger.debug("%s: ITERATION PROGRESS: %s of %s" % (
self.__taskLabel,
self.__numIterationsSoFar,
self.__requestedIterationCount))
iterationProgressCb = IterationProgressCb(
taskLabel=self.__task['taskLabel'],
requestedIterationCount=self.__task['iterationCount'],
logger=self.__logger)
iterationProgressReporter = PeriodicActivityRequest(
repeating=True,
period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS,
cb=iterationProgressCb)
periodicActivities.append(iterationProgressReporter)
return periodicActivities
def _getAndEmitExperimentMetrics(self, final=False):
# Get metrics
metrics = self.__taskDriver.getMetrics()
# Emit metrics
if metrics is not None:
if final:
self.__metricsLogger.emitFinalMetrics(metrics)
else:
self.__metricsLogger.emitPeriodicMetrics(metrics)
PeriodicActivityRequest = namedtuple("PeriodicActivityRequest",
("repeating", "period", "cb"))
"""Passed as parameter to PeriodicActivityMgr
repeating: True if the activity is a repeating activite, False if one-shot
period: period of activity's execution (number of "ticks")
cb: a callable to call upon expiration of period; will be called
as cb()
"""
class PeriodicActivityMgr(object):
Activity = namedtuple("Activity",
("repeating", "period", "cb", "iteratorHolder"))
"""Activity
iteratorHolder: a list holding one iterator; we use a list so that we can
replace the iterator for repeating activities (a tuple would not
allow it if the field was an imutable value)
"""
def __init__(self, requestedActivities):
"""
requestedActivities: a sequence of PeriodicActivityRequest elements
"""
self.__activities = []
for req in requestedActivities:
act = self.Activity(repeating=req.repeating,
period=req.period,
cb=req.cb,
iteratorHolder=[iter(xrange(req.period-1))])
self.__activities.append(act)
def tick(self):
"""Activity tick handler; services all activities
Returns:
True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period-1))
else:
act.iteratorHolder[0] = None
return True
def main():
""" Module-level entry point. Run according to options in sys.argv
Usage: python -m python -m nupic.frameworks.opf.experiment_runner
"""
initLogging(verbose=True)
# Initialize pseudo-random number generators (PRNGs)
#
# This will fix the seed that is used by numpy when generating 'random'
# numbers. This allows for repeatability across experiments.
initExperimentPrng()
# Run it!
runExperiment(sys.argv[1:])
if __name__ == "__main__":
main()
| agpl-3.0 |
rahuldhote/scikit-learn | examples/applications/plot_prediction_latency.py | 233 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
sstober/deepthought | deepthought/datasets/rwanda2013rhythms/Preprocessor.py | 1 | 7638 | '''
Created on Apr 1, 2014
@author: sstober
'''
import os;
import glob;
import csv;
import math;
import logging;
log = logging.getLogger(__name__);
import numpy as np;
import theano;
from pylearn2.utils.timing import log_timing
from deepthought.util.fs_util import save, load;
from deepthought.datasets.rwanda2013rhythms import LabelConverter
def load_data_file(filename):
#data = np.loadtxt(filename, dtype=float, delimiter=' ', skiprows=1); #, autostrip=True, names=False)
with log_timing(log, 'loading data from {}'.format(filename)):
data = np.genfromtxt(filename, dtype=theano.config.floatX, delimiter=' ', skip_header=1, autostrip=True);
log.info('loaded {}'.format(data.shape));
# print data.shape;
# print data[0];
# print data[-1];
return data;
# with open(filename, 'rb') as csvfile:
# csvreader = csv.reader(csvfile, delimiter=' ', skipinitialspace=True);
# csvreader.next(); # skip header
# for row in csvreader:
# print row;
# break
def load_csv_meta_file(filename):
# open in Universal mode
with open(filename, 'rU') as csvfile:
reader = csv.DictReader(csvfile, dialect='excel', delimiter=',');
onsets = [];
for line in reader:
log.debug(line);
# onsets[line['trial wav name']] = line['onset samples'];
# onsets[line['onset samples']] = line['trial wav name'];
onsets.append([line['onset samples'], line['trial wav name']]);
return onsets;
def load_xlsx_meta_file(filename):
import xlrd;
book = xlrd.open_workbook(filename, encoding_override="cp1252")
sheet = book.sheet_by_index(0);
onsets = [];
for i in range(1, sheet.nrows):
onsets.append([sheet.cell(i,2).value, sheet.cell(i,0).value.encode('ascii')]);
log.debug(onsets[-1]);
return onsets;
def split_trial(path, trial_len):
log.info('processing {}'.format(path));
datafile = glob.glob(os.path.join(path,'*.txt'))[0];
metafile = glob.glob(os.path.join(path,'*_Trials_Onsets.xlsx'))[0];
log.debug('data file: {}'.format(datafile));
log.debug('meta file: {}'.format(metafile));
onsets = load_xlsx_meta_file(metafile);
data = load_data_file(datafile);
log.debug(onsets);
onsets.append([len(data), 'end']); # artificial last marker
trials = {};
for i in xrange(len(onsets) - 1):
onset, label = onsets[i];
next_onset = onsets[i+1][0];
# rounding to integers
onset = int(math.floor(float(onset)));
next_onset = int(math.floor(float(next_onset)));
next_onset = min(onset+trial_len, next_onset);
log.debug('[{}..{}) -> {}'.format(onset, next_onset, label));
trial_data = np.vstack(data[onset:next_onset]);
log.debug('{} samples extracted'.format(trial_data.shape));
trials[label] = trial_data;
filename = os.path.join(path, 'trials.pklz');
with log_timing(log, 'saving to {}'.format(filename)):
save(filename, trials);
return trials;
def generate_cases(subject_id, trials, bad_channels=[]):
'''
3x60
4x60
-> 12 * 60 = 720ms
-> 60ms overlap
'''
label_converter = LabelConverter();
data = [];
labels = [];
channel_meta = [];
# trial_meta = [];
# trial_id = 0;
for stimulus, trial_data in trials.iteritems():
label = label_converter.get_stimulus_id(stimulus);
log.debug('processing {} with {} samples and label {}'.format(stimulus,trial_data.shape,label));
channels = trial_data.transpose();
for i, channel in enumerate(channels):
channel_id = i+1;
if channel_id in bad_channels:
log.debug('skipping bad channel {}'.format(channel_id));
continue;
# convert to float32
channel = np.asfarray(channel, dtype='float32');
data.append(channel);
labels.append(label);
# trial_meta.append([trial_id, stimulus]);
channel_meta.append(i);
# trial_id += 1;
data = np.vstack(data);
labels = np.vstack(labels);
# trial_meta = np.vstack(trial_meta);
channel_meta = np.vstack(channel_meta);
# subject_meta = np.vstack(subject_meta);
log.debug('generated {} data points and {} labels '.format(data.shape, labels.shape));
# return data, labels, trial_meta, channel_meta;
return data, labels, channel_meta;
# if __name__ == '__main__':
def preprocess(config):
# config = load_config(default_config='../train_sda.cfg');
DATA_ROOT = config.eeg.get('dataset_root', './');
SAMPLE_RATE = 400; # in Hz
TRIAL_LENGTH = 32; # in sec
TRIAL_LENGTH += 4; # add 4s after end of presentation
TRIAL_SAMPLE_LENGTH = SAMPLE_RATE * TRIAL_LENGTH;
log.info('using dataset at {}'.format(DATA_ROOT));
'''
Note from Dan:
All subjects should have channels 15, 16, 17 and 18 removed [...]
If you want to make them truly identical, you could remove channel 19 from
the subjects with more channels, although this should be 'good' data.
'''
bad_channels = {};
bad_channels[1] = [5, 6, 15, 16, 17, 18, 20, 21];
bad_channels[2] = [ 7, 8, 15, 16, 17, 18, 20, 21];
bad_channels[3] = [5, 6, 15, 16, 17, 18, 20, 21];
bad_channels[4] = [ 7, 8, 15, 16, 17, 18, 20, 21];
bad_channels[5] = [ 7, 8, 15, 16, 17, 18, 20, 21];
bad_channels[6] = [ 7, 8, 9, 12, 15, 16, 17, 18 ];
bad_channels[7] = [5, 6, 12, 15, 16, 17, 18, 20 ];
bad_channels[8] = [ 7, 8, 15, 16, 17, 18, 20, 21];
bad_channels[9] = [5, 6, 12, 15, 16, 17, 18, 20 ];
bad_channels[10] = [5, 6, 15, 16, 17, 18, 20, 21];
bad_channels[11] = [5, 6, 15, 16, 17, 18, 20, 21];
bad_channels[12] = [5, 6, 15, 16, 17, 18, 20, 21];
bad_channels[13] = [5, 6, 12, 15, 16, 17, 18, 20 ];
with log_timing(log, 'generating datasets'):
for subject_id in xrange(1,14):
search_path = os.path.join(DATA_ROOT, 'Sub{0:03d}*'.format(subject_id));
path = glob.glob(search_path);
if path is None or len(path) == 0:
log.warn('nothing found at {}'.format(search_path));
continue;
else:
path = path[0];
trials_filename = os.path.join(path, 'trials.pklz');
trials = None;
if not os.path.isfile(trials_filename):
log.debug('{} not found. running split_trial()'.format(trials_filename));
trials = split_trial(path, TRIAL_SAMPLE_LENGTH);
else:
with log_timing(log, 'loading data from {}'.format(trials_filename)):
trials = load(trials_filename);
assert trials;
dataset_filename = os.path.join(path, 'dataset_13goodchannels_plus4s.pklz');
dataset = generate_cases(subject_id, trials, bad_channels[subject_id]); # = data, labels
with log_timing(log, 'saving dataset to {}'.format(dataset_filename)):
save(dataset_filename, dataset); | bsd-3-clause |
uber/pyro | examples/eight_schools/mcmc.py | 1 | 1818 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import data
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import MCMC, NUTS
logging.basicConfig(format="%(message)s", level=logging.INFO)
pyro.set_rng_seed(0)
def model(sigma):
eta = pyro.sample("eta", dist.Normal(torch.zeros(data.J), torch.ones(data.J)))
mu = pyro.sample("mu", dist.Normal(torch.zeros(1), 10 * torch.ones(1)))
tau = pyro.sample("tau", dist.HalfCauchy(scale=25 * torch.ones(1)))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma))
def conditioned_model(model, sigma, y):
return poutine.condition(model, data={"obs": y})(sigma)
def main(args):
nuts_kernel = NUTS(conditioned_model, jit_compile=args.jit)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
)
mcmc.run(model, data.sigma, data.y)
mcmc.summary(prob=0.5)
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="Eight Schools MCMC")
parser.add_argument(
"--num-samples",
type=int,
default=1000,
help="number of MCMC samples (default: 1000)",
)
parser.add_argument(
"--num-chains",
type=int,
default=1,
help="number of parallel MCMC chains (default: 1)",
)
parser.add_argument(
"--warmup-steps",
type=int,
default=1000,
help="number of MCMC samples for warmup (default: 1000)",
)
parser.add_argument("--jit", action="store_true", default=False)
args = parser.parse_args()
main(args)
| apache-2.0 |
LamaHamadeh/Microsoft-DAT210x | Module 5/assignment5.py | 1 | 5381 | '''
author Lama Hamadeh
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn import preprocessing
from sklearn.decomposition import PCA
matplotlib.style.use('ggplot') # Look Pretty
#------------------------------------
def plotDecisionBoundary(model, X, y):
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.6
resolution = 0.0025
colors = ['royalblue','forestgreen','ghostwhite']
# Calculate the boundaries
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix are the predictions of the class at at said location
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.terrain)
# Plot the test original points as well...
for label in range(len(np.unique(y))):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], label=str(label), alpha=0.8)
p = model.get_params()
plt.axis('tight')
plt.title('K = ' + str(p['n_neighbors']))
#------------------------------------
#
# TODO: Load up the dataset into a variable called X. Check the .head and
# compare it to the file you loaded in a text editor. Make sure you're
# loading your data properly--don't fail on the 1st step!
#
# .. your code here ..
X = pd.read_csv('/Users/Admin/Desktop/DAT210x/DAT210x-master/Module5/Datasets/wheat.data')
#print X.head()
#------------------------------------
#
# TODO: Copy the 'wheat_type' series slice out of X, and into a series
# called 'y'. Then drop the original 'wheat_type' and the 'id' columns from the X
#
# .. your code here ..
y=X['wheat_type'].copy()
X.drop(labels = ['id', 'wheat_type'], inplace = True, axis = 1)
#------------------------------------
# TODO: Do a quick, "ordinal" conversion of 'y'. In actuality our
# classification isn't ordinal, but just as an experiment...
#
# .. your code here ..
y = y.astype('category').cat.codes
#------------------------------------
#
# TODO: Basic nan munging. Fill each row's nans with the mean of the feature
#
# .. your code here ..
#check which column have nans values in order o apply .fillna
def num_missing(x):
return sum(x.isnull())
#Applying per column:
print "Missing values per column:"
print X.apply(num_missing, axis=0) #axis=0 defines that function is to be applied on each column
#As the result indicats that 'compactness', 'width' and 'groove' have nan values where
#nans of compactness = 3
#nans of width = 1
#nans of groove = 4
#then we can apply the .fillna on them.
X.compactness.fillna(X.compactness.mean(), inplace = True)
X.width.fillna(X.width.mean(), inplace = True)
X.groove.fillna(X.groove.mean(), inplace = True)
#------------------------------------
#
# TODO: Just like your preprocessing transformation, create a PCA
# transformation as well. Fit it against your training data, and then
# project your training and testing features into PCA space using the
# PCA model's .transform() method.
#
# NOTE: This has to be done because the only way to visualize the decision
# boundary in 2D would be if your KNN algo ran in 2D as well:
#
# .. your code here ..
T = preprocessing.normalize(X)
pca = PCA(n_components = 2)
pca_X = pca.fit_transform(T)
#------------------------------------
#
# TODO: Split X into training and testing data sets using train_test_split().
# INFO: Use 0.33 test size, and use random_state=1. This is important
# so that your answers are verifiable. In the real world, you wouldn't
# specify a random_state.
#
# .. your code here ..
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(pca_X, y, test_size = 0.33, random_state = 1)
#We use X_train and y_train for training (fit), and use X_test and y_test for predicting or scoring.
##------------------------------------
#
# TODO: Create and train a KNeighborsClassifier. Start with K=9 neighbors.
# NOTE: Be sure train your classifier against the pre-processed, PCA-
# transformed training data above! You do not, of course, need to transform
# your labels.
#
# .. your code here ..
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 9)
knn.fit(X_train, y_train)
# HINT: Ensure your KNeighbors classifier object from earlier is called 'knn'
plotDecisionBoundary(knn, X_train, y_train)
#------------------------------------
#
# TODO: Display the accuracy score of your test data/labels, computed by
# your KNeighbors model.
#
# NOTE: You do NOT have to run .predict before calling .score, since
# .score will take care of running your predictions for you automatically.
#
# .. your code here ..
print knn.score(X_test, y_test)
#------------------------------------
#
# BONUS: Instead of the ordinal conversion, try and get this assignment
# working with a proper Pandas get_dummies for feature encoding. HINT:
# You might have to update some of the plotDecisionBoundary code.
plt.show()
| mit |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/scipy/stats/mstats_basic.py | 30 | 84684 | """
An extension of scipy.stats.stats to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ?
# TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__all__ = ['argstoarray',
'betai',
'count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
from scipy._lib.six import iteritems
import itertools
import warnings
from collections import namedtuple
from . import distributions
import scipy.special as special
from ._stats_mstats_common import (
_find_repeats,
linregress as stats_linregress,
theilslopes as stats_theilslopes
)
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" (%s <> %s)" % (na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
The input is cast to float64. Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
# Make sure we get a copy. ma.compressed promises a "new array", but can
# actually return a reference.
compr = np.asarray(ma.compressed(arr), dtype=np.float64)
if compr is arr or compr.base is arr:
compr = compr.copy()
return _find_repeats(compr)
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Notes
-----
For more details, see `stats.mode`.
"""
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
not_masked_indices = ma.flatnotmasked_edges(a)
first_not_masked_index = not_masked_indices[0]
return (a[first_not_masked_index], 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
return ModeResult(*output)
@np.deprecate(message="mstats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead.")
def betai(a, b, x):
"""
betai() is deprecated in scipy 0.17.0.
For details about this function, see `stats.betai`.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as `x` increases, so does
`y`. Negative correlations imply that as `x` increases, `y` decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1-D array_like
Input
y : 1-D array_like
Input
Returns
-------
pearsonr : float
Pearson's correlation coefficient, 2-tailed p-value.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
r_num = ma.add.reduce(xm*ym)
r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym))
r = r_num / r_den
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n - 2
if r is masked or abs(r) == 1.0:
prob = 0.
else:
t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r
prob = _betai(0.5*df, 0.5, df/(df + t_squared))
return r, prob
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(x, y, use_ties=True):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : array_like
The length of `x` must be > 2.
y : array_like
The length of `y` must be > 2.
use_ties : bool, optional
Whether the correction for ties should be computed.
Returns
-------
correlation : float
Spearman correlation coefficient
pvalue : float
2-tailed p-value.
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = _betai(0.5*df, 0.5, df/(df + t * t))
return SpearmanrResult(rho, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, use_ties=True, use_missing=False):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
correlation : float
Kendall tau
pvalue : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
if n < 2:
return KendalltauResult(np.nan, np.nan)
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return KendalltauResult(tau, prob)
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and its p-value.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
For more details on `pointbiserialr`, see `stats.pointbiserialr`.
"""
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = _betai(0.5*df, 0.5, df/(df+t*t))
return PointbiserialrResult(rpb, prob)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if y is None:
x = ma.array(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = ma.array(x)
y = ma.array(y)
x = x.flatten()
y = y.flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
slope, intercept, r, prob, sterrest = stats_linregress(x.data[~m],
y.data[~m])
else:
# All data is masked
return None, None, None, None, None
else:
slope, intercept, r, prob, sterrest = stats_linregress(x.data, y.data)
return LinregressResult(slope, intercept, r, prob, sterrest)
if stats_linregress.__doc__:
linregress.__doc__ = stats_linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
For more details on `theilslopes`, see `stats.theilslopes`.
"""
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `stats.theilslopes`
return stats_theilslopes(y, x, alpha=alpha)
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0):
"""
Calculates the T-test for the mean of ONE group of scores.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_1samp`, see `stats.ttest_1samp`.
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n - 1.0)
svar = ((n - 1.0) * v) / df
with np.errstate(divide='ignore', invalid='ignore'):
t = (x - popmean) / ma.sqrt(svar / n)
prob = special.betainc(0.5*df, 0.5, df/(df + t*t))
return Ttest_1sampResult(t, prob)
ttest_onesamp = ttest_1samp
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind(a, b, axis=0, equal_var=True):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True, perform a standard independent 2 sample test that assumes equal
population variances.
If False, perform Welch's t-test, which does not assume equal population
variance.
.. versionadded:: 0.17.0
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
For more details on `ttest_ind`, see `stats.ttest_ind`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
if equal_var:
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n1 + n2 - 2.0)
svar = ((n1-1)*v1+(n2-1)*v2) / df
denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
else:
vn1 = v1/n1
vn2 = v2/n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero.
# It doesn't matter what df is as long as it is not NaN.
df = np.where(np.isnan(df), 1, df)
denom = ma.sqrt(vn1 + vn2)
with np.errstate(divide='ignore', invalid='ignore'):
t = (x1-x2) / denom
probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape)
return Ttest_indResult(t, probs.squeeze())
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_rel`, see `stats.ttest_rel`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = ma.asanyarray(n-1.0)
d = (a-b).astype('d')
dm = d.mean(axis)
v = d.var(axis=axis, ddof=1)
denom = ma.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = dm / denom
probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze()
return Ttest_relResult(t, probs)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return MannwhitneyuResult(u, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
For more details on `kruskal`, see `stats.kruskal`.
"""
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = distributions.chi2.sf(H, df)
return KruskalResult(H, prob)
kruskalwallis = kruskal
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
@np.deprecate(message="mstats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
threshold : ndarray
Returns `a`, with values less then `threshmin` and values greater
`threshmax` replaced with `newval`.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> from scipy.stats.mstats import trim
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> print(trim(z,(3,8)))
[-- -- 3 4 5 6 7 8 -- --]
>>> print(trim(z,(0.1,0.2),relative=True))
[-- 2 3 4 5 6 7 8 -- --]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__ is not None:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is None.
Returns
-------
tmean : float
Notes
-----
For more details on `tmean`, see `stats.tmean`.
"""
return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
For more details on `tvar`, see `stats.tvar`.
"""
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
return np.ma.var(a) * n/(n-1.)
am = _mask_to_limits(a, limits=limits, inclusive=inclusive)
return np.ma.var(am, axis=axis, ddof=ddof)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float, int or ndarray
Notes
-----
For more details on `tmin`, see `stats.tmin`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float, int or ndarray
Notes
-----
For more details on `tmax`, see `stats.tmax`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. If None, compute over the
whole array. Default is zero.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
For more details on `tsem`, see `stats.tsem`.
"""
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(axis=axis, ddof=ddof)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(axis=axis, ddof=ddof))
return sd / np.sqrt(am.count())
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit)
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc)
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
Notes
-----
For more details about `moment`, see `stats.moment`.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - ma.expand_dims(a.mean(axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `stats.variation`.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
For more details about `skew`, see `stats.skew`.
"""
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
Notes
-----
For more details about `kurtosis`, see `stats.kurtosis`.
"""
a, axis = _chk_asarray(a, axis)
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=0, bias=True):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> from scipy.stats.mstats import describe
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
DescribeResult(nobs=array(3), minmax=(masked_array(data = 0,
mask = False,
fill_value = 999999)
, masked_array(data = 2,
mask = False,
fill_value = 999999)
), mean=1.0, variance=0.66666666666666663, skewness=masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
, kurtosis=-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
For more details about `skewtest`, see `stats.skewtest`.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
For more details about `kurtosistest`, see `stats.kurtosistest`.
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom < 0] = masked
elif denom < 0:
denom = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
Notes
-----
For more details about `normaltest`, see `stats.normaltest`.
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: http://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
... [ 47., 15., 2.],
... [ 49., 36., 3.],
... [ 15., 39., 4.],
... [ 42., 40., -999.],
... [ 41., 41., -999.],
... [ 7., -999., -999.],
... [ 39., -999., -999.],
... [ 43., -999., -999.],
... [ 40., -999., -999.],
... [ 36., -999., -999.]])
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[ 19.2 14.6 1.45]
[ 40. 37.5 2.5 ]
[ 42.8 40.05 3.55]]
>>> data[:, 2] = -999.
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[19.200000000000003 14.6 --]
[40.0 37.5 --]
[42.800000000000004 40.05 --]]
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
@np.deprecate(message="mstats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`stats.sem` as well as with the most common definition used (like in the R
documentation).
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> print(stats.mstats.sem(a))
[2.8284271247461903 2.8284271247461903 2.8284271247461903
2.8284271247461903]
Find standard error across the whole array, using n degrees of freedom:
>>> print(stats.mstats.sem(a, axis=None, ddof=0))
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
@np.deprecate(message="mstats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivariate data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
return FriedmanchisquareResult(chisq,
distributions.chi2.sf(chisq, k-1))
| mit |
jhseu/tensorflow | tensorflow/python/tpu/datasets_test.py | 24 | 7572 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU datasets tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.tpu import datasets
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
_NUM_FILES = 10
_NUM_ENTRIES = 20
class DatasetsTest(test.TestCase):
def setUp(self):
super(DatasetsTest, self).setUp()
self._coord = server_lib.Server.create_local_server()
self._worker = server_lib.Server.create_local_server()
self._cluster_def = cluster_pb2.ClusterDef()
worker_job = self._cluster_def.job.add()
worker_job.name = 'worker'
worker_job.tasks[0] = self._worker.target[len('grpc://'):]
coord_job = self._cluster_def.job.add()
coord_job.name = 'coordinator'
coord_job.tasks[0] = self._coord.target[len('grpc://'):]
session_config = config_pb2.ConfigProto(cluster_def=self._cluster_def)
self._sess = session.Session(self._worker.target, config=session_config)
self._worker_device = '/job:' + worker_job.name
def testTextLineDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'text_line.%d.txt' % i)
contents = []
for j in range(_NUM_ENTRIES):
contents.append(compat.as_bytes('%d: %d' % (i, j)))
with open(filename, 'wb') as f:
f.write(b'\n'.join(contents))
all_contents.extend(contents)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'text_line.*.txt'), filetype='text')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDataset(self):
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'tf_record*'), filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testTFRecordDatasetFromDataset(self):
filenames = []
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'tf_record.%d' % i)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for j in range(_NUM_ENTRIES):
record = compat.as_bytes('Record %d of file %d' % (j, i))
writer.write(record)
all_contents.append(record)
writer.close()
filenames = dataset_ops.Dataset.from_tensor_slices(filenames)
dataset = datasets.StreamingFilesDataset(filenames, filetype='tfrecord')
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFunc(self):
def MakeRecord(i, j):
return compat.as_bytes('%04d-%04d' % (i, j))
record_bytes = len(MakeRecord(10, 200))
all_contents = []
for i in range(_NUM_FILES):
filename = os.path.join(self.get_temp_dir(), 'fixed_length.%d' % i)
with open(filename, 'wb') as f:
for j in range(_NUM_ENTRIES):
record = MakeRecord(i, j)
f.write(record)
all_contents.append(record)
def FixedLengthFile(filename):
return readers.FixedLengthRecordDataset(filename, record_bytes)
dataset = datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), 'fixed_length*'),
filetype=FixedLengthFile)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = []
for _ in range(4 * len(all_contents)):
retrieved_values.append(compat.as_bytes(self._sess.run(get_next)))
self.assertEqual(set(all_contents), set(retrieved_values))
def testArbitraryReaderFuncFromDatasetGenerator(self):
def my_generator():
yield (1, [1] * 10)
def gen_dataset(dummy):
return dataset_ops.Dataset.from_generator(
my_generator, (dtypes.int64, dtypes.int64),
(tensor_shape.TensorShape([]), tensor_shape.TensorShape([10])))
dataset = datasets.StreamingFilesDataset(
dataset_ops.Dataset.range(10), filetype=gen_dataset)
with ops.device(self._worker_device):
iterator = dataset_ops.make_initializable_iterator(dataset)
self._sess.run(iterator.initializer)
get_next = iterator.get_next()
retrieved_values = self._sess.run(get_next)
self.assertIsInstance(retrieved_values, (list, tuple))
self.assertEqual(len(retrieved_values), 2)
self.assertEqual(retrieved_values[0], 1)
self.assertItemsEqual(retrieved_values[1], [1] * 10)
def testUnexpectedFiletypeString(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype='foo')
def testUnexpectedFiletypeType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(
os.path.join(self.get_temp_dir(), '*'), filetype=3)
def testUnexpectedFilesType(self):
with self.assertRaises(ValueError):
datasets.StreamingFilesDataset(123, filetype='tfrecord')
if __name__ == '__main__':
test.main()
| apache-2.0 |
rahuldhote/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 267 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
bmcfee/pescador | examples/frameworks/keras_example.py | 1 | 6232 | # -*- coding: utf-8 -*-
"""
===============
A Keras Example
===============
An example of how to use Pescador with Keras.
Original Code source:
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
"""
##############################################
# Setup and Definitions
##############################################
from __future__ import print_function
import datetime
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import pescador
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
##############################################
# Load and preprocess data
##############################################
def setup_data():
"""Load and shape data for training with Keras + Pescador.
Returns
-------
input_shape : tuple, len=3
Shape of each sample; adapts to channel configuration of Keras.
X_train, y_train : np.ndarrays
Images and labels for training.
X_test, y_test : np.ndarrays
Images and labels for test.
"""
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return input_shape, (x_train, y_train), (x_test, y_test)
##############################################
# Setup Keras model
##############################################
def build_model(input_shape):
"""Create a compiled Keras model.
Parameters
----------
input_shape : tuple, len=3
Shape of each image sample.
Returns
-------
model : keras.Model
Constructed model.
"""
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
##############################################
# Define Data Sampler
##############################################
@pescador.streamable
def sampler(X, y):
'''A basic generator for sampling data.
Parameters
----------
X : np.ndarray, len=n_samples, ndim=4
Image data.
y : np.ndarray, len=n_samples, ndim=2
One-hot encoded class vectors.
Yields
------
data : dict
Single image sample, like {X: np.ndarray, y: np.ndarray}
'''
X = np.atleast_2d(X)
# y's are binary vectors, and should be of shape (10,) after this.
y = np.atleast_1d(y)
n = X.shape[0]
while True:
i = np.random.randint(0, n)
yield {'X': X[i], 'y': y[i]}
##############################################
# Define a Custom Map Function
##############################################
@pescador.streamable
def additive_noise(stream, key='X', scale=1e-1):
'''Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
'''
for data in stream:
noise_shape = data[key].shape
noise = scale * np.random.randn(*noise_shape)
data[key] = data[key] + noise
yield data
##############################################
# Put it all together
##############################################
input_shape, (X_train, Y_train), (X_test, Y_test) = setup_data()
steps_per_epoch = len(X_train) // batch_size
# Create two streams from the same data, where one of the streams
# adds a small amount of Gaussian noise. You could easily perform
# other data augmentations using the same 'map' strategy.
stream = sampler(X_train, Y_train)
noisy_stream = additive_noise(stream, 'X')
# Multiplex the two streamers together.
mux = pescador.StochasticMux([stream, noisy_stream],
# Two streams, always active.
n_active=2,
# We want to sample from each stream infinitely.
rate=None)
# Buffer the stream into minibatches.
batches = pescador.buffer_stream(mux, batch_size)
model = build_model(input_shape)
try:
print("Start time: {}".format(datetime.datetime.now()))
model.fit_generator(
pescador.tuples(batches, 'X', 'y'),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=1,
validation_data=(X_test, Y_test))
except KeyboardInterrupt:
print("Stopping early")
finally:
print("Finished: {}".format(datetime.datetime.now()))
scores = model.evaluate(X_test, Y_test, verbose=0)
for val, name in zip(scores, model.metrics_names):
print('Test {}: {:0.4f}'.format(name, val))
| isc |
matthew-tucker/mne-python | setup.py | 3 | 5039 | #! /usr/bin/env python
#
# Copyright (C) 2011-2014 Alexandre Gramfort
# <alexandre.gramfort@telecom-paristech.fr>
import os
from os import path as op
import setuptools # noqa; we are using a setuptools namespace
from numpy.distutils.core import setup
# get the version (don't import mne here, so dependencies are not needed)
version = None
with open(os.path.join('mne', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
descr = """MNE python project for MEG and EEG data analysis."""
DISTNAME = 'mne'
DESCRIPTION = descr
MAINTAINER = 'Alexandre Gramfort'
MAINTAINER_EMAIL = 'alexandre.gramfort@telecom-paristech.fr'
URL = 'http://martinos.org/mne'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
VERSION = version
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=open('README.rst').read(),
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
platforms='any',
packages=['mne', 'mne.tests',
'mne.beamformer', 'mne.beamformer.tests',
'mne.commands', 'mne.commands.tests',
'mne.connectivity', 'mne.connectivity.tests',
'mne.data',
'mne.datasets',
'mne.datasets.eegbci',
'mne.datasets._fake',
'mne.datasets.megsim',
'mne.datasets.sample',
'mne.datasets.somato',
'mne.datasets.spm_face',
'mne.datasets.testing',
'mne.datasets.tests',
'mne.externals',
'mne.io', 'mne.io.tests',
'mne.io.array', 'mne.io.array.tests',
'mne.io.brainvision', 'mne.io.brainvision.tests',
'mne.io.bti', 'mne.io.bti.tests',
'mne.io.edf', 'mne.io.edf.tests',
'mne.io.egi', 'mne.io.egi.tests',
'mne.io.fiff', 'mne.io.fiff.tests',
'mne.io.kit', 'mne.io.kit.tests',
'mne.forward', 'mne.forward.tests',
'mne.viz', 'mne.viz.tests',
'mne.gui', 'mne.gui.tests',
'mne.minimum_norm', 'mne.minimum_norm.tests',
'mne.inverse_sparse', 'mne.inverse_sparse.tests',
'mne.preprocessing', 'mne.preprocessing.tests',
'mne.simulation', 'mne.simulation.tests',
'mne.tests',
'mne.stats', 'mne.stats.tests',
'mne.time_frequency', 'mne.time_frequency.tests',
'mne.realtime', 'mne.realtime.tests',
'mne.decoding', 'mne.decoding.tests',
'mne.commands', 'mne.externals',
'mne.externals.tempita',
'mne.channels',
'mne.channels.tests'],
package_data={'mne': [op.join('data', '*.sel'),
op.join('data', 'icos.fif.gz'),
op.join('data', 'coil_def*.dat'),
op.join('data', 'helmets', '*.fif.gz'),
op.join('data', 'FreeSurferColorLUT.txt'),
op.join('channels', 'data', 'layouts', '*.lout'),
op.join('channels', 'data', 'layouts', '*.lay'),
op.join('channels', 'data', 'montages', '*.sfp'),
op.join('channels', 'data', 'montages', '*.txt'),
op.join('channels', 'data', 'montages', '*.elc'),
op.join('channels', 'data', 'neighbors', '*.mat'),
op.join('html', '*.js'),
op.join('html', '*.css')]},
scripts=['bin/mne'])
| bsd-3-clause |
vitaly-krugl/nupic | examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py | 10 | 14780 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 1
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
'dataPath': None, # filled in by sub-experiment
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (os.path.abspath(config['dataPath'])),
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : 4000,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
QuLogic/iris | lib/iris/experimental/stratify.py | 5 | 7991 | # (C) British Crown Copyright 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Routines for putting data on new strata (aka. isosurfaces), often in the
Z direction.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
from functools import partial
import six
import numpy as np
import stratify
from iris.coords import Coord, AuxCoord, DimCoord
from iris.cube import Cube
def _copy_coords_without_z_dim(src, tgt, z_dim):
"""
Helper function to copy across non z-dimenson coordinates between cubes.
Parameters
----------
src : :class:`~iris.cube.Cube`
Incoming cube containing the coordinates to be copied from.
tgt : :class:`~iris.cube.Cube`
Outgoing cube for the coordinates to be copied to.
z_dim : int
Dimension within the `src` cube that is the z-dimension.
This dimension will not be copied. For example, the incoming
z-dimension cube has model level_height, whilst the outgoing
z-dimension cube has pressure.
"""
# Copy across non z-dimension coordinates.
for coord in src.dim_coords:
[dim] = src.coord_dims(coord)
if dim != z_dim:
tgt.add_dim_coord(coord.copy(), dim)
for coord in src.aux_coords:
dims = src.coord_dims(coord)
if z_dim not in dims:
tgt.add_aux_coord(coord.copy(), dims)
for coord in src.derived_coords:
dims = src.coord_dims(coord)
if z_dim not in dims:
tgt.add_aux_coord(coord.copy(), dims)
def relevel(cube, src_levels, tgt_levels, axis=None, interpolator=None):
"""
Interpolate the cube onto the specified target levels, given the
source levels of the cube.
For example, suppose we have two datasets `P(i,j,k)` and `H(i,j,k)`
and we want `P(i,j,H)`. We call :func:`relevel` with `cube=P`,
`src_levels=H` and `tgt_levels` being an array of the values of `H`
we would like.
This routine is especially useful for computing isosurfaces of phenomenon
that are generally monotonic in the direction of interpolation, such as
height/pressure or salinity/depth.
Parameters
----------
cube : :class:`~iris.cube.Cube`
The phenomenon data to be re-levelled.
src_levels : :class:`~iris.cube.Cube`, :class:`~iris.coord.Coord` or string
Describes the source levels of the `cube` that will be interpolated
over. The `src_levels` must be in the same system as the `tgt_levels`.
The dimensions of `src_levels` must be broadcastable to the dimensions
of the `cube`.
Note that, the coordinate name containing the source levels in the
`cube` may be provided.
tgt_levels : array-like
Describes the target levels of the `cube` to be interpolated to. The
`tgt_levels` must be in the same system as the `src_levels`. The
dimensions of the `tgt_levels` must be broadcastable to the dimensions
of the `cube`, except in the nominated axis of interpolation.
axis : int, :class:`~iris.coords.Coord` or string
The axis of interpolation. Defaults to the first dimension of the
`cube`, which is typically the z-dimension. Note that, the coordinate
name specifying the z-dimension of the `cube` may be provided.
interpolator : callable or None
The interpolator to use when computing the interpolation. The function
will be passed the following positional arguments::
(tgt-data, src-data, cube-data, axis-of-interpolation)
If the interpolator is None, :func:`stratify.interpolate` will be used
with linear interpolation and NaN extrapolation.
An example of constructing an alternative interpolation scheme:
from functools import partial
interpolator = partial(stratify.interpolate,
interpolation=stratify.INTERPOLATE_NEAREST,
extrapolation=stratify.EXTRAPOLATE_LINEAR)
"""
# Identify the z-coordinate within the phenomenon cube.
if axis is None:
axis = 0
if isinstance(axis, (six.string_types, Coord)):
[axis] = cube.coord_dims(axis)
# Get the source level data.
if isinstance(src_levels, six.string_types):
src_data = cube.coord(src_levels).points
elif isinstance(src_levels, Coord):
src_data = src_levels.points
else:
src_data = src_levels.data
# The dimensions of cube and src_data must be broadcastable.
try:
cube_data, src_data = np.broadcast_arrays(cube.data, src_data)
except ValueError:
emsg = ('Cannot broadcast the cube and src_levels with '
'shapes {} and {}.')
raise ValueError(emsg.format(cube.shape, src_data.shape))
tgt_levels = np.asarray(tgt_levels)
tgt_aux_dims = axis
if tgt_levels.ndim != 1:
# The dimensions of tgt_levels must be broadcastable to cube
# in everything but the interpolation axis - otherwise raise
# an exception.
dim_delta = cube_data.ndim - tgt_levels.ndim
# The axis is relative to the cube. Calculate the axis of
# interplation relative to the tgt_levels.
tgt_axis = axis - dim_delta
# Calculate the cube shape without the axis of interpolation.
data_shape = list(cube_data.shape)
data_shape.pop(axis)
# Calculate the tgt_levels shape without the axis of interpolation.
target_shape = list(tgt_levels.shape)
target_shape.pop(tgt_axis)
# Now ensure that the shapes are broadcastable.
try:
np.broadcast_arrays(np.empty(data_shape), np.empty(target_shape))
except ValueError:
emsg = ('Cannot broadcast the cube and tgt_levels with '
'shapes {} and {}, whilst ignoring axis of interpolation.')
raise ValueError(emsg.format(cube_data.shape, tgt_levels.shape))
# Calculate the dimensions over the cube that the tgt_levels span.
tgt_aux_dims = list(range(cube_data.ndim))[dim_delta:]
if interpolator is None:
# Use the default stratify interpolator.
interpolator = partial(stratify.interpolate,
interpolation='linear', extrapolation='nan')
# Now perform the interpolation.
new_data = interpolator(tgt_levels, src_data, cube_data, axis=axis)
# Create a result cube with the correct shape and metadata.
result = Cube(new_data, **cube.copy().metadata._asdict())
# Copy across non z-dimension coordinates from the source cube
# to the result cube.
_copy_coords_without_z_dim(cube, result, axis)
kwargs = dict(standard_name=src_levels.standard_name,
long_name=src_levels.long_name,
var_name=src_levels.var_name,
units=src_levels.units,
attributes=src_levels.attributes)
# Add our new interpolated coordinate to the result cube.
try:
coord = DimCoord(tgt_levels, **kwargs)
result.add_dim_coord(coord, axis)
except ValueError:
# Attach the data to the trailing dimensions.
coord = AuxCoord(tgt_levels, **kwargs)
result.add_aux_coord(coord, tgt_aux_dims)
return result
| gpl-3.0 |
rahuldhote/scikit-learn | sklearn/neighbors/base.py | 114 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
elvandy/nltools | examples/01_DataOperations/plot_download.py | 3 | 5125 | """
Basic Data Operations
=====================
A simple example showing how to download a dataset from neurovault and perform
basic data operations. The bulk of the nltools toolbox is built around the
Brain_Data() class. This class represents imaging data as a vectorized
features by observations matrix. Each image is an observation and each voxel
is a feature. The concept behind the class is to have a similar feel to a pandas
dataframe, which means that it should feel intuitive to manipulate the data.
"""
#########################################################################
# Download pain dataset from neurovault
# ---------------------------------------------------
#
# Here we fetch the pain dataset used in `Chang et al., 2015 <http://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1002180>`_
# from `neurovault <http://neurovault.org/collections/504/>`_. In this dataset
# there are 28 subjects with 3 separate beta images reflecting varying intensities
# of thermal pain (i.e., high, medium, low). The data will be downloaded to ~/nilearn_data,
# and automatically loaded as a Brain_Data() instance. The image metadata will be stored in data.X.
from nltools.datasets import fetch_pain
data = fetch_pain()
#########################################################################
# Load files
# ---------------------------------------------------
#
# Nifti images can be easily loaded simply by passing a string to a nifti file.
# Many images can be loaded together by passing a list of nifti files.
# For example, on linux or OSX systmes, the downloads from fetch_pain() will be
# stored in ~/nilearn_data. We will load subject 1's data.
# NOTES: Need to figure out how to get path to data working on rtd server
# from nltools.data import Brain_Data
# import glob
#
# sub1 = Brain_Data(glob.glob('~/nilearn_data/chang2015_pain/Pain_Subject_1*.nii.gz'))
#########################################################################
# Basic Brain_Data() Operations
# ---------------------------------------------------------
#
# Here are a few quick basic data operations.
# Find number of images in Brain_Data() instance
print(len(data))
#########################################################################
# Find the dimensions of the data. images x voxels
print(data.shape())
#########################################################################
# We can use any type of indexing to slice the data such as integers, lists
# of integers, or boolean.
print(data[[1,6,2]])
#########################################################################
# Calculate the mean for every voxel over images
data.mean()
#########################################################################
# Calculate the standard deviation for every voxel over images
data.std()
#########################################################################
# Methods can be chained. Here we get the shape of the mean.
print(data.mean().shape())
#########################################################################
# Brain_Data instances can be added and subtracted
new = data[1]+data[2]
#########################################################################
# Brain_Data instances can be manipulated with basic arithmetic operations
# Here we add 10 to every voxel and scale by 2
data2 = (data+10)*2
#########################################################################
# Brain_Data instances can be copied
new = data.copy()
#########################################################################
# Brain_Data instances can be easily converted to nibabel instances, which
# store the data in a 3D/4D matrix. This is useful for interfacing with other
# python toolboxes such as `nilearn <http://nilearn.github.io/>`_
data.to_nifti()
#########################################################################
# Brain_Data instances can be concatenated using the append method
new = new.append(data[4])
#########################################################################
# Any Brain_Data object can be written out to a nifti file
data.write('Tmp_Data.nii.gz')
#########################################################################
# Images within a Brain_Data() instance are iterable. Here we use a list
# comprehension to calculate the overall mean across all voxels within an
# image.
[x.mean() for x in data]
#########################################################################
# Basic Brain_Data() Plotting
# ---------------------------------------------------------
#
# There are multiple ways to plot data. First, Brain_Data() instances can be
# converted to a nibabel instance and plotted using any plot method such as
# nilearn.
from nilearn.plotting import plot_glass_brain
plot_glass_brain(data.mean().to_nifti())
#########################################################################
# There is also a fast montage plotting method. Here we plot the average image
# it will render a separate plot for each image. There is a 'limit' flag
# which allows you to specify the maximum number of images to display.
data.mean().plot()
| mit |
avati/samba | lib/dnspython/dns/rrset.py | 98 | 5895 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS RRsets (an RRset is a named rdataset)"""
import dns.name
import dns.rdataset
import dns.rdataclass
import dns.renderer
class RRset(dns.rdataset.Rdataset):
"""A DNS RRset (named rdataset).
RRset inherits from Rdataset, and RRsets can be treated as
Rdatasets in most cases. There are, however, a few notable
exceptions. RRsets have different to_wire() and to_text() method
arguments, reflecting the fact that RRsets always have an owner
name.
"""
__slots__ = ['name', 'deleting']
def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
deleting=None):
"""Create a new RRset."""
super(RRset, self).__init__(rdclass, rdtype, covers)
self.name = name
self.deleting = deleting
def _clone(self):
obj = super(RRset, self)._clone()
obj.name = self.name
obj.deleting = self.deleting
return obj
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
if not self.deleting is None:
dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
else:
dtext = ''
return '<DNS ' + str(self.name) + ' ' + \
dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two RRsets are equal if they have the same name and the same
rdataset
@rtype: bool"""
if not isinstance(other, RRset):
return False
if self.name != other.name:
return False
return super(RRset, self).__eq__(other)
def match(self, name, rdclass, rdtype, covers, deleting=None):
"""Returns True if this rrset matches the specified class, type,
covers, and deletion state."""
if not super(RRset, self).match(rdclass, rdtype, covers):
return False
if self.name != name or self.deleting != deleting:
return False
return True
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the RRset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
return super(RRset, self).to_text(self.name, origin, relativize,
self.deleting, **kw)
def to_wire(self, file, compress=None, origin=None, **kw):
"""Convert the RRset to wire format."""
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw)
def to_rdataset(self):
"""Convert an RRset into an Rdataset.
@rtype: dns.rdataset.Rdataset object
"""
return dns.rdataset.from_rdata_list(self.ttl, list(self))
def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type, and with
the specified list of rdatas in text format.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
r = RRset(name, rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type and with
the specified rdatas in text format.
@rtype: dns.rrset.RRset object
"""
return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
def from_rdata_list(name, ttl, rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified list of rdata objects.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = RRset(name, rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(name, ttl, *rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified rdata objects.
@rtype: dns.rrset.RRset object
"""
return from_rdata_list(name, ttl, rdatas)
| gpl-3.0 |
daiqing2009/CLS_chatlog | CN_dfidf.py | 1 | 2899 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 28 17:40:42 2014
@author: david.dai
"""
import datetime
import sqlite3
import os
import codecs
import re
import jieba.posseg as pseg
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
def persist_chatlog(filename, conn):
with conn:
cur = conn.cursor()
f = codecs.open(filename,'r+','gbk')
messages = []
for line in f.readlines():
if line.startwith():
msg=generateMsg(line)
print msg
messages.append(msg.msgTime,msg.who,msg.said,False)
cur.executemany('insert into messages(msg_time,who,said,is_confirmed) values (?,?,?,?)',messages)
def generateMsg(line):
s = line.split()
msgTime = datetime.datetime.strptime(s[0],"%Y年%m月%d日%X")
n = re.search('^(gelnic简妮:)?.+?:'.decode('utf-8','ignore'),s[1])
who = n.group(0)
said = s[1][len(who):]
return Message(msgTime,who,said)
def JB_tokenizer():
words = pseg.cut(sentences)
for w in words:
#extract nouns & verbs only
if(w.flag in ['n','v']):
w.word
#TODO: 1. traning 2. dateset 測試
def get_cls_model(sentences, categories):
#extract features via jieba, only consider noun & verb
#building pipeline
text_clf = Pipeline([('vect', CountVectorizer(tokenizer=JB_tokenizer )),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
#return trained models
text_clf = text_clf.fit(JB_tokenizer,categories)
return text_clf
#copy of https://docs.python.org/2/library/sqlite3.html
class Message(object):
def __init__(self,msgTime,who,said):
self.msgTime, self.who, self.said = msgTime, who, said
def __repr__(self):
return "on %s, %s said: %s" % (self.msgTime.strftime("%Y-%m-%d %X"), self.who, self.said)
#获取文件列表(该目录下放着100份文档)
def getFilelist(path):
filelist = []
files = os.listdir(path)
for f in files:
if(f[0] == '.'):
pass
else:
filelist.append(f)
return filelist,path
if __name__ == "__main__":
conn = sqlite3.connect("CN_CLS")
with conn:
cur.execute("create table messages(id INTEGER PRIMARY KEY AUTOINCREMENT, msg_time datetime, who TEXT, said TEXT, category TEXT, is_confirmed BOOLEAN)")
#從目錄下读取文档
for filename in getFilelist("./chatlog"):
persist_chatlog(filename,conn )
#模拟写入category
#build models
clsModel = get_cls_model(sentences, categories)
#evaluate the model
clsModel.predict()
| mit |
rahuldhote/scikit-learn | sklearn/tests/test_base.py | 215 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/svm/base.py | 155 | 36018 | from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto", invalid_gamma, "0.18"),
DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0) or
(self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr':
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| bsd-3-clause |
yepengxj/nmt | nmt/nmt.py | 1 | 68062 | '''
Build a attention-based neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
from scipy import optimize, stats
from collections import OrderedDict
from sklearn.cross_validation import KFold
import wmt14enfr
import iwslt14zhen
import openmt15zhen
import trans_enhi
import stan
profile = False
# datasets: 'name', 'load_data: returns iterator', 'prepare_data: some preprocessing'
datasets = {'wmt14enfr': (wmt14enfr.load_data, wmt14enfr.prepare_data),
'iwslt14zhen': (iwslt14zhen.load_data, iwslt14zhen.prepare_data),
'openmt15zhen': (openmt15zhen.load_data, openmt15zhen.prepare_data),
'trans_enhi': (trans_enhi.load_data, trans_enhi.prepare_data),
'stan': (stan.load_data, stan.prepare_data),
}
def get_dataset(name):
return datasets[name][0], datasets[name][1]
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s'%(pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lstm_cond': ('param_init_lstm_cond', 'lstm_cond_layer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
'gru_cond_simple': ('param_init_gru_cond_simple', 'gru_cond_simple_layer'),
'gru_hiero': ('param_init_gru_hiero', 'gru_hiero_layer'),
'rnn': ('param_init_rnn', 'rnn_layer'),
'rnn_cond': ('param_init_rnn_cond', 'rnn_cond_layer'),
'rnn_hiero': ('param_init_rnn_hiero', 'rnn_hiero_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# RNN layer
def param_init_rnn(options, params, prefix='rnn', nin=None, dim=None):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def rnn_layer(tparams, state_below, options, prefix='rnn', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[0]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
def _step(m_, xx_, h_, Ux):
preactx = tensor.dot(h_, Ux)
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
rval, updates = theano.scan(_step,
sequences=[mask, state_belowx],
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences=[tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile)
rval = [rval]
return rval
# Conditional RNN layer with Attention
def param_init_rnn_cond(options, params, prefix='rnn_cond', nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_rnn(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def rnn_cond_layer(tparams, state_below, options, prefix='rnn',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Ux')].shape[0]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
pctx_ += tparams[_p(prefix,'b_att')]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_belowx += tparams[_p(prefix, 'bx')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, xx_, xc_, h_, ctx_, alpha_, pctx_,
Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (context * alpha[:,:,None]).sum(0) # current context
preactx = tensor.dot(h_, Ux)
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
if one_step:
rval = _step(mask, state_belowx, state_belowc, init_state, None, None,
pctx_, tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')] )
else:
rval, updates = theano.scan(_step,
sequences=[mask, state_belowx, state_belowc],
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
#None, None, None,
#None, None],
non_sequences=[pctx_,
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]
],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile)
return rval
# Hierarchical RNN layer
def param_init_rnn_hiero(options, params, prefix='rnn_hiero', nin=None, dimctx=None):
if nin == None:
nin = options['dim']
if dimctx == None:
dimctx = options['dim']
dim = dimctx
params = param_init_rnn(options, params, prefix, nin=nin, dim=dim)
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
# stop probability:
W_st = norm_weight(dim, 1)
params[_p(prefix,'W_st')] = W_st
b_st = numpy.zeros((1,)).astype('float32')
params[_p(prefix,'b_st')] = b_st
return params
def rnn_hiero_layer(tparams, context, options, prefix='rnn_hiero',
context_mask=None, **kwargs):
nsteps = context.shape[0]
if context.ndim == 3:
n_samples = context.shape[1]
else:
n_samples = 1
# mask
if context_mask == None:
mask = tensor.alloc(1., context.shape[0], 1)
else:
mask = context_mask
dim = tparams[_p(prefix, 'Ux')].shape[0]
# initial/previous state
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, h_, ctx_, alpha_, v_, pctx_,
Wd_att, U_att, c_tt, Ux, Wx, bx, W_st, b_st):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (context * alpha[:,:,None]).sum(0) # current context
preactx = tensor.dot(h_, Ux)
preactx += tensor.dot(ctx_, Wx)
preactx += bx
h = tensor.tanh(preactx)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
# compute stopping probability
ss = tensor.nnet.sigmoid(tensor.dot(h, W_st) + b_st)
v_ = v_ * (1. - ss)[:,0][:,None]
return h, ctx_, alpha.T, v_[:,0] #, pstate_, preact, preactx, r, u
rval, updates = theano.scan(_step,
sequences=[mask],
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0]),
tensor.alloc(1., n_samples)],
#None, None, None,
#None, None],
non_sequences=[pctx_,
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wx')],
tparams[_p(prefix, 'bx')],
tparams[_p(prefix, 'W_st')],
tparams[_p(prefix, 'b_st')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile)
rval[0] = rval[0] * rval[3][:,:,None]
return rval
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer without Attention
def param_init_gru_cond_simple(options, params, prefix='gru_cond', nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
return params
def gru_cond_simple_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Ux')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 2, 'Context must be 2-d: #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc')])
pctxx_ = tensor.dot(context, tparams[_p(prefix,'Wcx')])
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
def _step_slice(m_, x_, xx_, h_, pctx_, pctxx_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
preact += pctx_
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += pctxx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
if one_step:
rval = _step(*(seqs+[init_state, pctx_, pctxx_]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state],
non_sequences=[pctx_,
pctxx_]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond', nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _step_slice(m_, x_, xx_, xc_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += x_
preact += tensor.dot(ctx_, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
#None, None, None,
#None, None],
non_sequences=[pctx_,
context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# Hierarchical GRU layer
def param_init_gru_hiero(options, params, prefix='gru_hiero', nin=None, dimctx=None):
if nin == None:
nin = options['dim']
if dimctx == None:
dimctx = options['dim']
dim = dimctx
params = param_init_gru(options, params, prefix, nin=nin, dim=dim, hiero=True)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
# stop probability:
W_st = norm_weight(dim, 1)
params[_p(prefix,'W_st')] = W_st
b_st = -0. * numpy.ones((1,)).astype('float32')
params[_p(prefix,'b_st')] = b_st
return params
def gru_hiero_layer(tparams, context, options, prefix='gru_hiero',
context_mask=None, **kwargs):
nsteps = context.shape[0]
if context.ndim == 3:
n_samples = context.shape[1]
else:
n_samples = 1
# mask
if context_mask == None:
mask = tensor.alloc(1., context.shape[0], 1)
else:
mask = context_mask
dim = tparams[_p(prefix, 'W_st')].shape[0]
# initial/previous state
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step_slice(m_, h_, ctx_, alpha_, v_, pp_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wx, bx, W_st, b_st):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pp_ + pstate_[None,:,:]
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += tensor.dot(ctx, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx += tensor.dot(ctx, Wx)
preactx += bx
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
# compute stopping probability
ss = tensor.nnet.sigmoid(tensor.dot(h, W_st) + b_st)
v_ = v_ * (1. - ss)[:,0][:,None]
return h, ctx, alpha.T, v_[:,0] #, pstate_, preact, preactx, r, u
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=[mask],
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0]),
tensor.alloc(1., n_samples)],
#None, None, None,
#None, None],
non_sequences=[pctx_,
context,
tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wx')],
tparams[_p(prefix, 'bx')],
tparams[_p(prefix, 'W_st')],
tparams[_p(prefix, 'b_st')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval[0] = rval[0] * rval[3][:,:,None]
return rval
# LSTM layer
def param_init_lstm(options, params, prefix='lstm', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'U')].shape[0]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
preact += tparams[_p(prefix, 'b')]
i = tensor.nnet.sigmoid(_slice(preact, 0, dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, dim))
c = tensor.tanh(_slice(preact, 3, dim))
c = f * c_ + i * c
c = m_[:,None] * c + (1. - m_)[:,None] * c_
h = o * tensor.tanh(c)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, c, i, f, o, preact
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info = [tensor.alloc(0., n_samples, dim),
tensor.alloc(0., n_samples, dim),
None, None, None, None],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile)
return rval
# Conditional LSTM layer with Attention
def param_init_lstm_cond(options, params, prefix='lstm_cond', nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_lstm(options, params, prefix, nin, dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*4)
params[_p(prefix,'Wc')] = Wc
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def lstm_cond_layer(tparams, state_below, options, prefix='lstm',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_memory, 'previous memory must be provided'
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'U')].shape[0]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# initial/previous memory
if init_memory == None:
init_memory = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
# projected x
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, x_, xc_, h_, c_, ctx_, alpha_, pctx_):
# attention
pstate_ = tensor.dot(h_, tparams[_p(prefix,'Wd_att')])
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, tparams[_p(prefix,'U_att')])+tparams[_p(prefix, 'c_tt')]
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (context * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
preact += tensor.dot(ctx_, tparams[_p(prefix, 'Wc')])
i = tensor.nnet.sigmoid(_slice(preact, 0, dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, dim))
c = tensor.tanh(_slice(preact, 3, dim))
c = f * c_ + i * c
c = m_[:,None] * c + (1. - m_)[:,None] * c_
h = o * tensor.tanh(c)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, c, ctx_, alpha.T, pstate_, preact, i, f, o
if one_step:
rval = _step(mask, state_below, state_belowc, init_state, init_memory, None, None, pctx_)
else:
rval, updates = theano.scan(_step,
sequences=[mask, state_below, state_belowc],
outputs_info = [init_state, init_memory,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0]),
None, None, None,
None, None],
non_sequences=[pctx_],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: LSTM
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
ctxdim = options['dim']
if not options['decoder'].endswith('simple'):
ctxdim = options['dim'] * 2
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'], dim=options['dim'])
if options['hiero']:
params = get_layer(options['hiero'])[0](options, params, prefix='hiero',
nin=2*options['dim'], dimctx=2*options['dim'])
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state', nin=ctxdim, nout=options['dim'])
if options['encoder'] == 'lstm':
params = get_layer('ff')[0](options, params, prefix='ff_memory', nin=ctxdim, nout=options['dim'])
# decoder: LSTM
params = get_layer(options['decoder'])[0](options, params, prefix='decoder',
nin=options['dim_word'], dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm', nin=options['dim'], nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev', nin=options['dim_word'], nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx', nin=ctxdim, nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
if options['decoder'].endswith('simple'):
ctx = proj[0][-1]
ctx_mean = ctx
else:
embr = tparams['Wemb'][xr.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
if options['hiero']:
#ctx = tensor.dot(ctx, tparams['W_hiero'])
rval = get_layer(options['hiero'])[1](tparams, ctx, options,
prefix='hiero',
context_mask=x_mask)
ctx = rval[0]
opt_ret['hiero_alphas'] = rval[2]
opt_ret['hiero_betas'] = rval[3]
# initial state/cell
ctx_mean = ctx.mean(0)
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')
init_memory = None
if options['encoder'] == 'lstm':
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')
# word embedding (target)
emb = tparams['Wemb_dec'][y.flatten()].reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state,
init_memory=init_memory)
proj_h = proj[0]
if options['decoder'].endswith('simple'):
ctxs = ctx[None,:,:]
else:
if options['decoder'].startswith('lstm'):
ctxs = proj[2]
opt_ret['dec_alphas'] = proj[3]
else:
ctxs = proj[1]
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options, prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options, prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1], logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0],y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options, prefix='encoder')
if options['decoder'].endswith('simple'):
ctx = proj[0][-1]
ctx_mean = ctx
else:
projr = get_layer(options['encoder'])[1](tparams, embr, options, prefix='encoder_r')
ctx = concatenate([proj[0],projr[0][::-1]], axis=proj[0].ndim-1)
if options['hiero']:
rval = get_layer(options['hiero'])[1](tparams, ctx, options, prefix='hiero')
ctx = rval[0]
# initial state/cell
ctx_mean = ctx.mean(0)
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')
if options['encoder'] == 'lstm':
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
if options['decoder'].startswith('lstm'):
outs += [init_memory]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
if options['decoder'].startswith('lstm'):
init_memory = tensor.matrix('init_memory', dtype='float32')
else:
init_memory = None
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:,None] < 0, tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state,
init_memory=init_memory)
if options['decoder'].endswith('simple'):
next_state = proj
ctxs = ctx
else:
next_state = proj[0]
ctxs = proj[1]
if options['decoder'].startswith('lstm'):
next_memory = proj[1]
ctxs = proj[2]
logit_lstm = get_layer('ff')[1](tparams, next_state, options, prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options, prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options, prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
if options['decoder'].startswith('lstm'):
inps += [init_memory]
outs += [next_memory]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
if options['decoder'].startswith('lstm'):
hyp_memories = []
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
if options['decoder'].startswith('lstm'):
next_memory = ret[2]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
if options['decoder'].endswith('simple'):
ctx = numpy.tile(ctx0, [live_k, 1])
else:
ctx = numpy.tile(ctx0.reshape((ctx0.shape[0],ctx0.shape[2])),
[live_k, 1, 1]).transpose((1,0,2))
inps = [next_w, ctx, next_state]
if options['decoder'].startswith('lstm'):
inps += [next_memory]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if options['decoder'].startswith('lstm'):
next_memory = ret[3]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
if options['decoder'].startswith('lstm'):
new_hyp_memories = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
new_hyp_states.append(copy.copy(next_state[ti]))
if options['decoder'].startswith('lstm'):
new_hyp_memories.append(copy.copy(next_memory[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
if options['decoder'].startswith('lstm'):
hyp_memories = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
if options['decoder'].startswith('lstm'):
hyp_memories.append(new_hyp_memories[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if options['decoder'].startswith('lstm'):
next_memory = numpy.array(hyp_memories)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
iterator.start()
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y, n_words_src=options['n_words_src'], n_words=options['n_words'])
pprobs = f_log_probs(x,x_mask,y,y_mask)
for pp in pprobs:
probs.append(pp)
if verbose:
print >>sys.stderr, '%d samples computed'%(n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
hiero=None, #'gru_hiero', # or None
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
diag_c=0.,
lrate=0.01,
n_words_src=100000,
n_words=100000,
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
dataset='wmt14enfr',
dictionary=None, # word dictionary
dictionary_src=None, # word dictionary
use_dropout=False,
reload_=False):
# Model options
model_options = locals().copy()
if dictionary:
with open(dictionary, 'rb') as f:
word_dict = pkl.load(f)
word_idict = dict()
for kk, vv in word_dict.iteritems():
word_idict[vv] = kk
if dictionary_src:
with open(dictionary_src, 'rb') as f:
word_dict_src = pkl.load(f)
word_idict_src = dict()
for kk, vv in word_dict_src.iteritems():
word_idict_src[vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
print 'Loading data'
load_data, prepare_data = get_dataset(dataset)
train, valid, test = load_data(batch_size=batch_size)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
theano.printing.debugprint(cost.mean(), file=open('cost.txt', 'w'))
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:,None]-
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
if model_options['hiero'] != None:
print 'Building f_beta...',
f_beta = theano.function([x, x_mask], opt_ret['hiero_betas'], profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
print 'Building f_grad...',
f_grad = theano.function(inps, grads, profile=profile)
print 'Done'
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = list(numpy.load(saveto)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
train.start()
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src, n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, x_mask, y, y_mask)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
#import ipdb; ipdb.set_trace()
if best_p != None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5,x.shape[1])):
stochastic = False
sample, score = gen_sample(tparams, f_init, f_next, x[:,jj][:,None],
model_options, trng=trng, k=1, maxlen=30,
stochastic=stochastic, argmax=True)
print 'Source ',jj,': ',
for vv in x[:,jj]:
if vv == 0:
break
if vv in word_idict_src:
print word_idict_src[vv],
else:
print 'UNK',
print
print 'Truth ',jj,' : ',
for vv in y[:,jj]:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
if model_options['hiero']:
betas = f_beta(x[:,jj][:,None], x_mask[:,jj][:,None])
print 'Validity ', jj,': ',
for vv,bb in zip(y[:,jj],betas[:,0]):
if vv == 0:
break
print bb,
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
#for _, tindex in kf:
# x, mask = prepare_data(train[0][train_index])
# train_err += (f_pred(x, mask) == train[1][tindex]).sum()
#train_err = 1. - numpy.float32(train_err) / train[0].shape[0]
#train_err = pred_error(f_pred, prepare_data, train, kf)
if valid != None:
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
if test != None:
test_err = pred_probs(f_log_probs, prepare_data, model_options, test).mean()
history_errs.append([valid_err, test_err])
if uidx == 0 or valid_err <= numpy.array(history_errs)[:,0].min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience,0].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
#print 'Epoch ', eidx, 'Update ', uidx, 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
print 'Seen %d samples'%n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
#train_err = pred_error(f_pred, prepare_data, train, kf)
if valid != None:
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
if test != None:
test_err = pred_probs(f_log_probs, prepare_data, model_options, test).mean()
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
if best_p != None:
params = copy.copy(best_p)
else:
params = unzip(tparams)
numpy.savez(saveto, zipped_params=best_p, train_err=train_err,
valid_err=valid_err, test_err=test_err, history_errs=history_errs,
**params)
return train_err, valid_err, test_err
if __name__ == '__main__':
pass
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/neighbors/graph.py | 207 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
hyperspy/hyperspy | hyperspy/_signals/eds.py | 2 | 47066 | # -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <https://www.gnu.org/licenses/#GPL>.
import itertools
import logging
import numpy as np
import warnings
from collections.abc import Iterable
from matplotlib import pyplot as plt
from hyperspy import utils
from hyperspy.signal import BaseSignal
from hyperspy._signals.signal1d import Signal1D, LazySignal1D
from hyperspy.misc.elements import elements as elements_db
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.utils import isiterable
from hyperspy.utils.plot import markers
from hyperspy.docstrings.plot import (BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
class EDSSpectrum(Signal1D):
"""General 1D signal class for EDS spectra."""
_signal_type = "EDS"
def __init__(self, *args, **kwards):
super().__init__(*args, **kwards)
if self.metadata.Signal.signal_type == 'EDS':
warnings.warn('The microscope type is not set. Use '
'set_signal_type(\'EDS_TEM\') '
'or set_signal_type(\'EDS_SEM\')')
self.axes_manager.signal_axes[0].is_binned = True
self._xray_markers = {}
def _get_line_energy(self, Xray_line, FWHM_MnKa=None):
"""
Get the line energy and the energy resolution of a Xray line.
The return values are in the same units than the signal axis
Parameters
----------
Xray_line : strings
Valid element X-ray lines e.g. Fe_Kb
FWHM_MnKa: {None, float, 'auto'}
The energy resolution of the detector in eV
if 'auto', used the one in
'self.metadata.Acquisition_instrument.SEM.Detector.EDS.energy_resolution_MnKa'
Returns
-------
float: the line energy, if FWHM_MnKa is None
(float,float): the line energy and the energy resolution, if FWHM_MnKa
is not None
"""
units_name = self.axes_manager.signal_axes[0].units
if FWHM_MnKa == 'auto':
if self.metadata.Signal.signal_type == "EDS_SEM":
FWHM_MnKa = self.metadata.Acquisition_instrument.SEM.\
Detector.EDS.energy_resolution_MnKa
elif self.metadata.Signal.signal_type == "EDS_TEM":
FWHM_MnKa = self.metadata.Acquisition_instrument.TEM.\
Detector.EDS.energy_resolution_MnKa
else:
raise NotImplementedError(
"This method only works for EDS_TEM or EDS_SEM signals. "
"You can use `set_signal_type('EDS_TEM')` or"
"`set_signal_type('EDS_SEM')` to convert to one of these"
"signal types.")
line_energy = utils_eds._get_energy_xray_line(Xray_line)
if units_name == 'eV':
line_energy *= 1000
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(
FWHM_MnKa, line_energy / 1000) * 1000
elif units_name == 'keV':
if FWHM_MnKa is not None:
line_FWHM = utils_eds.get_FWHM_at_Energy(FWHM_MnKa,
line_energy)
else:
raise ValueError(
f"{units_name} is not a valid units for the energy axis. "
"Only `eV` and `keV` are supported. "
"If `s` is the variable containing this EDS spectrum:\n "
">>> s.axes_manager.signal_axes[0].units = 'keV' \n")
if FWHM_MnKa is None:
return line_energy
else:
return line_energy, line_FWHM
def _get_beam_energy(self):
"""
Get the beam energy.
The return value is in the same units than the signal axis
"""
if "Acquisition_instrument.SEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.SEM.beam_energy
elif "Acquisition_instrument.TEM.beam_energy" in self.metadata:
beam_energy = self.metadata.Acquisition_instrument.TEM.beam_energy
else:
raise AttributeError(
"The beam energy is not defined in `metadata`. "
"Use `set_microscope_parameters` to set it.")
units_name = self.axes_manager.signal_axes[0].units
if units_name == 'eV':
beam_energy *= 1000
return beam_energy
def _get_xray_lines_in_spectral_range(self, xray_lines):
"""
Return the lines in the energy range
Parameters
----------
xray_lines: List of string
The xray_lines
Return
------
The list of xray_lines in the energy range
"""
ax = self.axes_manager.signal_axes[0]
low_value = ax.low_value
high_value = ax.high_value
try:
if self._get_beam_energy() < high_value:
high_value = self._get_beam_energy()
except AttributeError:
# in case the beam energy is not defined in the metadata
pass
xray_lines_in_range = []
xray_lines_not_in_range = []
for xray_line in xray_lines:
line_energy = self._get_line_energy(xray_line)
if low_value < line_energy < high_value:
xray_lines_in_range.append(xray_line)
else:
xray_lines_not_in_range.append(xray_line)
return xray_lines_in_range, xray_lines_not_in_range
def sum(self, axis=None, out=None):
if axis is None:
axis = self.axes_manager.navigation_axes
s = super().sum(axis=axis, out=out)
s = out or s
# Update live time by the change in navigation axes dimensions
time_factor = (
np.prod([ax.size for ax in self.axes_manager.navigation_axes])
/ np.prod([ax.size for ax in s.axes_manager.navigation_axes])
)
aimd = s.metadata.get_item('Acquisition_instrument', None)
if aimd is not None:
aimd = s.metadata.Acquisition_instrument
if "SEM.Detector.EDS.live_time" in aimd:
aimd.SEM.Detector.EDS.live_time *= time_factor
elif "TEM.Detector.EDS.live_time" in aimd:
aimd.TEM.Detector.EDS.live_time *= time_factor
else:
_logger.info("Live_time could not be found in the metadata and "
"has not been updated.")
if out is None:
return s
sum.__doc__ = Signal1D.sum.__doc__
def rebin(self, new_shape=None, scale=None, crop=True, dtype=None,
out=None):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale,)
m = super().rebin(new_shape=new_shape, scale=scale, crop=crop,
dtype=dtype, out=out)
m = out or m
time_factor = np.prod([factors[axis.index_in_array]
for axis in m.axes_manager.navigation_axes])
aimd = m.metadata.Acquisition_instrument
if "Acquisition_instrument.SEM.Detector.EDS.real_time" in m.metadata:
aimd.SEM.Detector.EDS.real_time *= time_factor
elif "Acquisition_instrument.TEM.Detector.EDS.real_time" in m.metadata:
aimd.TEM.Detector.EDS.real_time *= time_factor
else:
_logger.info(
"real_time could not be found in the metadata and has not been updated.")
if "Acquisition_instrument.SEM.Detector.EDS.live_time" in m.metadata:
aimd.SEM.Detector.EDS.live_time *= time_factor
elif "Acquisition_instrument.TEM.Detector.EDS.live_time" in m.metadata:
aimd.TEM.Detector.EDS.live_time *= time_factor
else:
_logger.info(
"Live_time could not be found in the metadata and has not been updated.")
if out is None:
return m
else:
out.events.data_changed.trigger(obj=out)
return m
rebin.__doc__ = BaseSignal.rebin.__doc__
def set_elements(self, elements):
"""Erase all elements and set them.
Parameters
----------
elements : list of strings
A list of chemical element symbols.
See also
--------
add_elements, set_lines, add_lines
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> print(s.metadata.Sample.elements)
>>> s.set_elements(['Al'])
>>> print(s.metadata.Sample.elements)
['Al' 'C' 'Cu' 'Mn' 'Zr']
['Al']
"""
# Erase previous elements and X-ray lines
if "Sample.elements" in self.metadata:
del self.metadata.Sample.elements
self.add_elements(elements)
def add_elements(self, elements):
"""Add elements and the corresponding X-ray lines.
The list of elements is stored in `metadata.Sample.elements`
Parameters
----------
elements : list of strings
The symbol of the elements.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> print(s.metadata.Sample.elements)
>>> s.add_elements(['Ar'])
>>> print(s.metadata.Sample.elements)
['Al' 'C' 'Cu' 'Mn' 'Zr']
['Al', 'Ar', 'C', 'Cu', 'Mn', 'Zr']
See also
--------
set_elements, add_lines, set_lines
"""
if not isiterable(elements) or isinstance(elements, str):
raise ValueError(
"Input must be in the form of a list. For example, "
"if `s` is the variable containing this EDS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
if "Sample.elements" in self.metadata:
elements_ = set(self.metadata.Sample.elements)
else:
elements_ = set()
for element in elements:
if element in elements_db:
elements_.add(element)
else:
raise ValueError(
f"{element} is not a valid chemical element symbol.")
self.metadata.set_item('Sample.elements', sorted(list(elements_)))
def _get_xray_lines(self, xray_lines=None, only_one=None,
only_lines=('a',)):
if xray_lines is None:
if 'Sample.xray_lines' in self.metadata:
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
raise ValueError(
"Not X-ray line, set them with `add_elements`.")
return xray_lines
def set_lines(self,
lines,
only_one=True,
only_lines=('a',)):
"""Erase all Xrays lines and set them.
See add_lines for details.
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
>>> s.set_lines(['Cu_Ka'])
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_La', 'Zr_La']
See also
--------
add_lines, add_elements, set_elements
"""
only_lines = utils_eds._parse_only_lines(only_lines)
if "Sample.xray_lines" in self.metadata:
del self.metadata.Sample.xray_lines
self.add_lines(lines=lines,
only_one=only_one,
only_lines=only_lines)
def add_lines(self,
lines=(),
only_one=True,
only_lines=("a",)):
"""Add X-rays lines to the internal list.
Although most functions do not require an internal list of
X-ray lines because they can be calculated from the internal
list of elements, ocassionally it might be useful to customize the
X-ray lines to be use by all functions by default using this method.
The list of X-ray lines is stored in
`metadata.Sample.xray_lines`
Parameters
----------
lines : list of strings
A list of valid element X-ray lines to add e.g. Fe_Kb.
Additionally, if `metadata.Sample.elements` is
defined, add the lines of those elements that where not
given in this list. If the list is empty (default), and
`metadata.Sample.elements` is
defined, add the lines of all those elements.
only_one: bool
If False, add all the lines of each element in
`metadata.Sample.elements` that has not line
defined in lines. If True (default),
only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be added.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_microscope_parameters(beam_energy=30)
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Mn_Ka', 'Zr_La']
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.add_lines()
>>> print(s.metadata.Sample.xray_lines)
>>> s.add_lines(['Cu_Ka'])
>>> print(s.metadata.Sample.xray_lines)
['Al_Ka', 'C_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
['Al_Ka', 'C_Ka', 'Cu_Ka', 'Cu_La', 'Mn_La', 'Zr_La']
See also
--------
set_lines, add_elements, set_elements
"""
only_lines = utils_eds._parse_only_lines(only_lines)
if "Sample.xray_lines" in self.metadata:
xray_lines = set(self.metadata.Sample.xray_lines)
else:
xray_lines = set()
# Define the elements which Xray lines has been customized
# So that we don't attempt to add new lines automatically
elements = set()
for line in xray_lines:
elements.add(line.split("_")[0])
for line in lines:
try:
element, subshell = line.split("_")
except ValueError:
raise ValueError(
"Invalid line symbol. "
"Please provide a valid line symbol e.g. Fe_Ka")
if element in elements_db:
elements.add(element)
if subshell in elements_db[element]['Atomic_properties'
]['Xray_lines']:
lines_len = len(xray_lines)
xray_lines.add(line)
if lines_len != len(xray_lines):
_logger.info(f"{line} line added,")
else:
_logger.info(f"{line} line already in.")
else:
raise ValueError(
f"{line} is not a valid line of {element}.")
else:
raise ValueError(
f"{element} is not a valid symbol of an element.")
xray_not_here = self._get_xray_lines_in_spectral_range(xray_lines)[1]
for xray in xray_not_here:
warnings.warn(f"{xray} is not in the data energy range.",
UserWarning)
if "Sample.elements" in self.metadata:
extra_elements = (set(self.metadata.Sample.elements) -
elements)
if extra_elements:
new_lines = self._get_lines_from_elements(
extra_elements,
only_one=only_one,
only_lines=only_lines)
if new_lines:
self.add_lines(list(new_lines) + list(lines))
self.add_elements(elements)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
if "Sample.xray_lines" in self.metadata:
xray_lines = xray_lines.union(
self.metadata.Sample.xray_lines)
self.metadata.Sample.xray_lines = sorted(list(xray_lines))
def _get_lines_from_elements(self,
elements,
only_one=False,
only_lines=("a",)):
"""Returns the X-ray lines of the given elements in spectral range
of the data.
Parameters
----------
elements : list of strings
A list containing the symbol of the chemical elements.
only_one : bool
If False, add all the lines of each element in the data spectral
range. If True only add the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, only the given lines will be returned.
Returns
-------
list of X-ray lines alphabetically sorted
"""
only_lines = utils_eds._parse_only_lines(only_lines)
try:
beam_energy = self._get_beam_energy()
except BaseException:
# Fall back to the high_value of the energy axis
beam_energy = self.axes_manager.signal_axes[0].high_value
lines = []
elements = [el if isinstance(el, str) else el.decode()
for el in elements]
for element in elements:
# Possible line (existing and excited by electron)
element_lines = []
for subshell in list(elements_db[element]['Atomic_properties'
]['Xray_lines'].keys()):
if only_lines and subshell not in only_lines:
continue
element_lines.append(element + "_" + subshell)
element_lines = self._get_xray_lines_in_spectral_range(
element_lines)[0]
if only_one and element_lines:
# Choose the best line
select_this = -1
element_lines.sort()
for i, line in enumerate(element_lines):
if (self._get_line_energy(line) < beam_energy / 2):
select_this = i
break
element_lines = [element_lines[select_this], ]
if not element_lines:
_logger.info(f"There is no X-ray line for element {element} "
"in the data spectral range")
else:
lines.extend(element_lines)
lines.sort()
return lines
def _parse_xray_lines(self, xray_lines, only_one, only_lines):
only_lines = utils_eds._parse_only_lines(only_lines)
xray_lines = self._get_xray_lines(xray_lines, only_one=only_one,
only_lines=only_lines)
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
xray_lines)
for xray in xray_not_here:
warnings.warn(f"{xray} is not in the data energy range. "
"You can remove it with: "
f"`s.metadata.Sample.xray_lines.remove('{xray}')`")
return xray_lines
def get_lines_intensity(self,
xray_lines=None,
integration_windows=2.,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, Iterable* of strings}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
* Note that while dictionaries and strings are iterable,
their use is ambiguous and specifically not allowed.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
if xray_lines is not None and \
(not isinstance(xray_lines, Iterable) or \
isinstance(xray_lines, (str, dict))):
raise TypeError(
"xray_lines must be a compatible iterable, but was "
f"mistakenly provided as a {type(xray_lines)}.")
xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
if hasattr(integration_windows, '__iter__') is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(
zip(xray_lines, integration_windows)):
element, line = utils_eds._get_element_and_line(Xray_line)
line_energy = self._get_line_energy(Xray_line)
img = self.isig[window[0]:window[1]].integrate1D(-1)
if np.issubdtype(img.data.dtype, np.integer):
# The operations below require a float dtype with the default
# numpy casting rule ('same_kind')
img.change_dtype("float")
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de))
for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0]:bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2]:bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2]))
img = img - (bck1 + bck2) * corr_factor
img.metadata.General.title = (
f'X-ray line intensity of {self.metadata.General.title}: '
f'{Xray_line} at {line_energy:.2f} '
f'{self.axes_manager.signal_axes[0].units}')
img = img.transpose(signal_axes=[])
if plot_result and img.axes_manager.navigation_size == 1:
if img._lazy:
img.compute()
print(f"{Xray_line} at {line_energy} {ax.units} : "
f"Intensity = {img.data[0]:.2f}")
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.navigation_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
def get_take_off_angle(self):
"""Calculate the take-off-angle (TOA).
TOA is the angle with which the X-rays leave the surface towards
the detector. Parameters are read in 'SEM.Stage.tilt_alpha',
'Acquisition_instrument.SEM.Detector.EDS.azimuth_angle' and
'SEM.Detector.EDS.elevation_angle' and 'SEM.Stage.tilt_beta in
'metadata'.
Returns
-------
take_off_angle: float
in Degree
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_take_off_angle()
37.0
>>> s.set_microscope_parameters(tilt_stage=20.)
>>> s.get_take_off_angle()
57.0
See also
--------
hs.eds.take_off_angle
"""
if self.metadata.Signal.signal_type == "EDS_SEM":
mp = self.metadata.Acquisition_instrument.SEM
elif self.metadata.Signal.signal_type == "EDS_TEM":
mp = self.metadata.Acquisition_instrument.TEM
tilt_stage = mp.get_item('Stage.tilt_alpha', None)
azimuth_angle = mp.get_item('Detector.EDS.azimuth_angle', None)
elevation_angle = mp.get_item('Detector.EDS.elevation_angle', None)
beta_tilt = mp.get_item('Stage.tilt_beta', 0.0)
return utils.eds.take_off_angle(
tilt_stage,
azimuth_angle,
elevation_angle,
beta_tilt
)
def estimate_integration_windows(self,
windows_width=2.,
xray_lines=None):
"""
Estimate a window of integration for each X-ray line.
Parameters
----------
windows_width: float
The width of the integration windows is the 'windows_width' times
the calculated FWHM of the line.
xray_lines: None or list of string
If None, use 'metadata.Sample.elements.xray_lines'. Else,
provide an iterable containing a list of valid X-ray lines
symbols.
Return
------
integration_windows: 2D array of float
The positions of the windows in energy. Each row corresponds to a
X-ray line. Each row contains the left and right value of the
window.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> iw = s.estimate_integration_windows()
>>> s.plot(integration_windows=iw)
>>> s.get_lines_intensity(integration_windows=iw, plot_result=True)
Fe_Ka at 6.4039 keV : Intensity = 3710.00
Pt_La at 9.4421 keV : Intensity = 15872.00
See also
--------
plot, get_lines_intensity
"""
xray_lines = self._get_xray_lines(xray_lines)
integration_windows = []
for Xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(Xray_line,
FWHM_MnKa='auto')
element, line = utils_eds._get_element_and_line(Xray_line)
det = windows_width * line_FWHM / 2.
integration_windows.append([line_energy - det, line_energy + det])
return integration_windows
def estimate_background_windows(self,
line_width=[2, 2],
windows_width=1,
xray_lines=None):
"""
Estimate two windows around each X-ray line containing only the
background.
Parameters
----------
line_width: list of two floats
The position of the two windows around the X-ray line is given by
the `line_width` (left and right) times the calculated FWHM of the
line.
windows_width: float
The width of the windows is is the `windows_width` times the
calculated FWHM of the line.
xray_lines: None or list of string
If None, use `metadata.Sample.elements.xray_lines`. Else,
provide an iterable containing a list of valid X-ray lines
symbols.
Return
------
windows_position: 2D array of float
The position of the windows in energy. Each line corresponds to a
X-ray line. In a line, the two first values correspond to the
limits of the left window and the two last values correspond to
the limits of the right window.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Fe_Ka at 6.4039 keV : Intensity = 2754.00
Pt_La at 9.4421 keV : Intensity = 15090.00
See also
--------
plot, get_lines_intensity
"""
xray_lines = self._get_xray_lines(xray_lines)
windows_position = []
for xray_line in xray_lines:
line_energy, line_FWHM = self._get_line_energy(xray_line,
FWHM_MnKa='auto')
tmp = [
line_energy - line_FWHM * line_width[0] -
line_FWHM * windows_width,
line_energy - line_FWHM * line_width[0],
line_energy + line_FWHM * line_width[1],
line_energy + line_FWHM * line_width[1] +
line_FWHM * windows_width
]
windows_position.append(tmp)
windows_position = np.array(windows_position)
# merge ovelapping windows
index = windows_position.argsort(axis=0)[:, 0]
for i in range(len(index) - 1):
ia, ib = index[i], index[i + 1]
if windows_position[ia, 2] > windows_position[ib, 0]:
interv = np.append(windows_position[ia, :2],
windows_position[ib, 2:])
windows_position[ia] = interv
windows_position[ib] = interv
return windows_position
def plot(self,
xray_lines=False,
only_lines=("a", "b"),
only_one=False,
background_windows=None,
integration_windows=None,
navigator="auto",
plot_markers=True,
autoscale='v',
norm="auto",
axes_manager=None,
navigator_kwds={},
**kwargs):
"""Plot the EDS spectrum. The following markers can be added
- The position of the X-ray lines and their names.
- The background windows associated with each X-ray lines. A black line
links the left and right window with the average value in each window.
Parameters
----------
xray_lines: {False, True, 'from_elements', list of string}
If not False, indicate the position and the name of the X-ray
lines.
If True, if `metadata.Sample.elements.xray_lines` contains a
list of lines use those. If `metadata.Sample.elements.xray_lines`
is undefined or empty or if xray_lines equals 'from_elements' and
`metadata.Sample.elements` is defined, use the same syntax as
`add_line` to select a subset of lines for the operation.
Alternatively, provide an iterable containing a list of valid X-ray
lines symbols.
only_lines : None or list of strings
If not None, use only the given lines (eg. ('a','Kb')).
If None, use all lines.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
background_windows: None or 2D array of float
If not None, add markers at the position of the windows in energy.
Each line corresponds to a X-ray lines. In a line, the two first
value corresponds to the limit of the left window and the two
last values corresponds to the limit of the right window.
integration_windows: None or 'auto' or float or 2D array of float
If not None, add markers at the position of the integration
windows.
If 'auto' (or float), the width of the integration windows is 2.0
(or float) times the calculated FWHM of the line. see
'estimate_integration_windows'.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
%s
%s
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot()
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(True)
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows='auto')
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw, integration_windows=2.1)
See also
--------
set_elements, add_elements, estimate_integration_windows,
get_lines_intensity, estimate_background_windows
"""
super().plot(navigator=navigator,
plot_markers=plot_markers,
autoscale=autoscale,
norm=norm,
axes_manager=axes_manager,
navigator_kwds=navigator_kwds,
**kwargs)
self._plot_xray_lines(xray_lines, only_lines, only_one,
background_windows, integration_windows,
render_figure=False)
self._render_figure(plot=['signal_plot'])
plot.__doc__ %= (BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
def _plot_xray_lines(self, xray_lines=False, only_lines=("a", "b"),
only_one=False, background_windows=None,
integration_windows=None, render_figure=True):
if (xray_lines is not False or
background_windows is not None or
integration_windows is not None):
if xray_lines is False:
xray_lines = True
only_lines = utils_eds._parse_only_lines(only_lines)
if xray_lines is True or xray_lines == 'from_elements':
if ('Sample.xray_lines' in self.metadata and
xray_lines != 'from_elements'):
xray_lines = self.metadata.Sample.xray_lines
elif 'Sample.elements' in self.metadata:
xray_lines = self._get_lines_from_elements(
self.metadata.Sample.elements,
only_one=only_one,
only_lines=only_lines)
else:
_logger.warning(
"No elements defined, set them with `add_elements`")
# No X-rays lines, nothing to do then
return
xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
xray_lines)
for xray in xray_not_here:
_logger.warning(f"{xray} is not in the data energy range.")
xray_lines = np.unique(xray_lines)
self.add_xray_lines_markers(xray_lines, render_figure=False)
if background_windows is not None:
self._add_background_windows_markers(background_windows,
render_figure=False)
if integration_windows is not None:
if integration_windows == 'auto':
integration_windows = 2.0
if hasattr(integration_windows, '__iter__') is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows,
xray_lines=xray_lines)
self._add_vertical_lines_groups(integration_windows,
linestyle='--',
render_figure=False)
# Render figure only at the end
if render_figure:
self._render_figure(plot=['signal_plot'])
def _add_vertical_lines_groups(self, position, render_figure=True,
**kwargs):
"""
Add vertical markers for each group that shares the color.
Parameters
----------
position: 2D array of float
The position on the signal axis. Each row corresponds to a
group.
kwargs
keywords argument for markers.vertical_line
"""
per_xray = len(position[0])
colors = itertools.cycle(np.sort(
plt.rcParams['axes.prop_cycle'].by_key()["color"] * per_xray))
for x, color in zip(np.ravel(position), colors):
line = markers.vertical_line(x=x, color=color, **kwargs)
self.add_marker(line, render_figure=False)
if render_figure:
self._render_figure(plot=['signal_plot'])
def add_xray_lines_markers(self, xray_lines, render_figure=True):
"""
Add marker on a spec.plot() with the name of the selected X-ray
lines
Parameters
----------
xray_lines: list of string
A valid list of X-ray lines
"""
if self._plot is None or not self._plot.is_active:
raise RuntimeError("The signal needs to be plotted.")
# in case of log scale, if some lines have intensity zero, then
# the line and label will not be displayed.
norm = self._plot.signal_plot.ax_lines[0].norm
minimum_intensity = self.data[self.data>0].min() if norm == 'log' else 0
line_energy = []
intensity = []
for xray_line in xray_lines:
element, line = utils_eds._get_element_and_line(xray_line)
line_energy.append(self._get_line_energy(xray_line))
relative_factor = elements_db[element][
'Atomic_properties']['Xray_lines'][line]['weight']
a_eng = self._get_line_energy(f'{element}_{line[0]}a')
idx = self.axes_manager.signal_axes[0].value2index(a_eng)
intensity.append(self.data[..., idx] * relative_factor)
for i in range(len(line_energy)):
# When using `log` norm, clip value to minimum value > 0
if norm == 'log':
intensity_ = np.max(
intensity[i], axis=-1, initial=minimum_intensity
)
else:
intensity_ = intensity[i]
line = markers.vertical_line_segment(
x=line_energy[i], y1=None, y2=intensity_ * 0.8)
self.add_marker(line, render_figure=False)
string = (r'$\mathrm{%s}_{\mathrm{%s}}$' %
utils_eds._get_element_and_line(xray_lines[i]))
text = markers.text(
x=line_energy[i],
y=intensity_ * 1.1,
text=string,
rotation=90)
self.add_marker(text, render_figure=False)
self._xray_markers[xray_lines[i]] = [line, text]
line.events.closed.connect(self._xray_marker_closed)
text.events.closed.connect(self._xray_marker_closed)
if render_figure:
self._render_figure(plot=['signal_plot'])
def _xray_marker_closed(self, obj):
marker = obj
for xray_line, line_markers in reversed(list(
self._xray_markers.items())):
if marker in line_markers:
line_markers.remove(marker)
if not line_markers:
self._xray_markers.pop(xray_line)
def remove_xray_lines_markers(self, xray_lines, render_figure=True):
"""
Remove marker previosuly added on a spec.plot() with the name of the
selected X-ray lines
Parameters
----------
xray_lines: list of string
A valid list of X-ray lines to remove
"""
for xray_line in xray_lines:
if xray_line in self._xray_markers:
line_markers = self._xray_markers[xray_line]
while line_markers:
m = line_markers.pop()
m.close(render_figure=False)
if render_figure:
self._render_figure(plot=['signal_plot'])
def _add_background_windows_markers(self, windows_position,
render_figure=True):
"""
Plot the background windows associated with each X-ray lines.
For X-ray lines, a black line links the left and right window with the
average value in each window.
Parameters
----------
windows_position: 2D array of float
The position of the windows in energy. Each line corresponds to a
X-ray lines. In a line, the two first value corresponds to the
limit of the left window and the two last values corresponds to the
limit of the right window.
See also
--------
estimate_background_windows, get_lines_intensity
"""
self._add_vertical_lines_groups(windows_position)
ax = self.axes_manager.signal_axes[0]
for bw in windows_position:
# TODO: test to prevent slicing bug. To be reomved when fixed
if ax.value2index(bw[0]) == ax.value2index(bw[1]):
y1 = self.isig[bw[0]].data
else:
y1 = self.isig[bw[0]:bw[1]].mean(-1).data
if ax.value2index(bw[2]) == ax.value2index(bw[3]):
y2 = self.isig[bw[2]].data
else:
y2 = self.isig[bw[2]:bw[3]].mean(-1).data
line = markers.line_segment(
x1=(bw[0] + bw[1]) / 2., x2=(bw[2] + bw[3]) / 2.,
y1=y1, y2=y2, color='black')
self.add_marker(line, render_figure=False)
if render_figure:
self._render_figure(plot=['signal_plot'])
class LazyEDSSpectrum(EDSSpectrum, LazySignal1D):
pass
| gpl-3.0 |
Ldpe2G/mxnet | example/torch/torch_module.py | 15 | 1651 | # pylint: skip-file
from data import mnist_iterator
import mxnet as mx
import numpy as np
import logging
# define mlp
use_torch_criterion = False
data = mx.symbol.Variable('data')
fc1 = mx.symbol.TorchModule(data_0=data, lua_string='nn.Linear(784, 128)', num_data=1, num_params=2, num_outputs=1, name='fc1')
act1 = mx.symbol.TorchModule(data_0=fc1, lua_string='nn.ReLU(false)', num_data=1, num_params=0, num_outputs=1, name='relu1')
fc2 = mx.symbol.TorchModule(data_0=act1, lua_string='nn.Linear(128, 64)', num_data=1, num_params=2, num_outputs=1, name='fc2')
act2 = mx.symbol.TorchModule(data_0=fc2, lua_string='nn.ReLU(false)', num_data=1, num_params=0, num_outputs=1, name='relu2')
fc3 = mx.symbol.TorchModule(data_0=act2, lua_string='nn.Linear(64, 10)', num_data=1, num_params=2, num_outputs=1, name='fc3')
if use_torch_criterion:
logsoftmax = mx.symbol.TorchModule(data_0=fc3, lua_string='nn.LogSoftMax()', num_data=1, num_params=0, num_outputs=1, name='logsoftmax')
# Torch's label starts from 1
label = mx.symbol.Variable('softmax_label') + 1
mlp = mx.symbol.TorchCriterion(data=logsoftmax, label=label, lua_string='nn.ClassNLLCriterion()', name='softmax')
else:
mlp = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
# data
train, val = mnist_iterator(batch_size=100, input_shape = (784,))
# train
logging.basicConfig(level=logging.DEBUG)
model = mx.model.FeedForward(
ctx = mx.cpu(0), symbol = mlp, num_epoch = 20,
learning_rate = 0.1, momentum = 0.9, wd = 0.00001)
if use_torch_criterion:
model.fit(X=train, eval_data=val, eval_metric=mx.metric.Torch())
else:
model.fit(X=train, eval_data=val)
| apache-2.0 |
Ldpe2G/mxnet | python/mxnet/__init__.py | 9 | 1519 | #!/usr/bin/env python
# coding: utf-8
"""MXNet: a concise, fast and flexible framework for deep learning."""
from __future__ import absolute_import
from .context import Context, current_context, cpu, gpu
from .base import MXNetError
from . import base
from . import contrib
from . import ndarray
from . import name
# use mx.sym as short for symbol
from . import symbol as sym
from . import symbol
from . import symbol_doc
from . import io
from . import recordio
from . import operator
# use mx.nd as short for mx.ndarray
from . import ndarray as nd
# use mx.rnd as short for mx.random
from . import random as rnd
from . import random
from . import optimizer
from . import model
from . import notebook
from . import initializer
# use mx.init as short for mx.initializer
from . import initializer as init
from . import visualization
# use viz as short for mx.ndarray
from . import visualization as viz
from . import callback
# from . import misc
from . import lr_scheduler
# use mx.kv as short for kvstore
from . import kvstore as kv
from . import kvstore_server
# Runtime compile module
from .rtc import Rtc as rtc
# Attribute scope to add attributes to symbolic graphs
from .attribute import AttrScope
from . import monitor
from . import monitor as mon
from . import torch
from . import torch as th
from . import profiler
from . import log
from . import module
from . import module as mod
from . import image
from . import image as img
from . import test_utils
from . import rnn
__version__ = base.__version__
| apache-2.0 |
justincassidy/scikit-learn | examples/linear_model/plot_robust_fit.py | 237 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
QijunPan/ansible | lib/ansible/modules/storage/zfs/zfs_facts.py | 9 | 8678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
alias: [ "ds", "dataset" ]
type: str
required: yes
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: False
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
alias: [ "props" ]
type: str
default: all
required: false
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
alias: [ "props" ]
type: str
default: all
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
required: false
depth:
description:
- Specifiies recurion depth.
type: int
default: None
required: false
'''
EXAMPLES = '''
name: Gather facts about ZFS dataset rpool/export/home
zfs_facts: dataset=rpool/export/home
name: Report space usage on ZFS filesystems under data/home
zfs_facts: name=data/home recurse=yes type=filesystem
debug: msg='ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: string
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: boolean
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: string
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fishcorn/pylearn2 | pylearn2/datasets/tests/test_mnistplus.py | 35 | 1978 | """
This file tests the MNISTPlus class. majorly concerning the X and y member
of the dataset and their corresponding sizes, data scales and topological
views.
"""
from pylearn2.datasets.mnistplus import MNISTPlus
from pylearn2.space import IndexSpace, VectorSpace
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
def test_MNISTPlus():
"""
Test the MNISTPlus warper.
Tests the scale of data, the splitting of train, valid, test sets.
Tests that a topological batch has 4 dimensions.
Tests that it work well with selected type of augmentation.
"""
skip_if_no_data()
for subset in ['train', 'valid', 'test']:
ids = MNISTPlus(which_set=subset)
assert 0.01 >= ids.X.min() >= 0.0
assert 0.99 <= ids.X.max() <= 1.0
topo = ids.get_batch_topo(1)
assert topo.ndim == 4
del ids
train_y = MNISTPlus(which_set='train', label_type='label')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='azimuth')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert 0.0 <= train_y.y.max() <= 1.0
assert 0.0 <= train_y.y.min() <= 1.0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='rotation')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
train_y = MNISTPlus(which_set='train', label_type='texture_id')
assert 0.99 <= train_y.X.max() <= 1.0
assert 0.0 <= train_y.X.min() <= 0.01
assert train_y.y.max() == 9
assert train_y.y.min() == 0
assert train_y.y.shape == (train_y.X.shape[0], 1)
| bsd-3-clause |
quheng/scikit-learn | sklearn/datasets/tests/test_20news.py | 277 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
fishcorn/pylearn2 | pylearn2/train_extensions/window_flip.py | 41 | 7218 | """ TrainExtensions for doing random spatial windowing and flipping of an
image dataset on every epoch. TODO: fill out properly."""
import warnings
import numpy
from . import TrainExtension
from pylearn2.datasets.preprocessing import CentralWindow
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import py_integer_types
try:
from ..utils._window_flip import random_window_and_flip_c01b
from ..utils._window_flip import random_window_and_flip_b01c
except ImportError:
reraise_as(ImportError("Import of Cython module failed. Please make sure "
"you have run 'python setup.py develop' in the "
"pylearn2 directory"))
__authors__ = "David Warde-Farley"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "wardefar@iro"
def _zero_pad(array, amount, axes=(1, 2)):
"""
Returns a copy of <array> with zero-filled padding around the margins.
The new array has the same dimensions as the input array, except for
the dimensions given by <axes>, which are increased by 2*<amount>.
Parameters
----------
array: numpy.ndarray
The array to zero-pad.
amount: int
The number of zeros to append to the beginning and end of each dimension
in <axes>. (That axis will grow by 2*<amount>).
axes: tuple
The dimensions to pad. These are indices, not axis names like the 0, 1
in ('b', 0, 1, 'c').
"""
if amount == 0:
return array
new_shape = []
slices = []
for i, s in enumerate(array.shape):
if i in axes:
new_shape.append(s + 2 * amount)
slices.append(slice(amount, -amount))
else:
new_shape.append(s)
slices.append(slice(None))
new_shape = tuple(new_shape)
slices = tuple(slices)
new_array = numpy.zeros(new_shape, dtype=array.dtype)
new_array[slices] = array
return new_array
class WindowAndFlip(TrainExtension):
"""
An extension that allows an image dataset to be flipped and
windowed after each epoch of training.
Parameters
----------
window_shape : WRITEME
randomize : list, optional
If specified, a list of Datasets to randomly window and
flip at each epoch.
randomize_once : list, optional
If specified, a list of Datasets to randomly window and
flip once at the start of training.
center : list, optional
If specified, a list of Datasets to centrally window
once at the start of training.
rng : numpy.random.RandomState object or seed, optional
A random number generator or seed used to create one.
Seeded deterministically by default.
pad_randomized : int, optional
Amount of padding to add to each side of the images
in `randomize` and `randomize_once`. Useful if you
want to do zero-padded windowing with `window_shape`
the actual size of the dataset, and validate/test on
full-size images instead of central patches. Default
is 0.
flip : bool, optional
Reflect images on the horizontal axis with probability
0.5. `True` by default.
"""
def __init__(self,
window_shape,
randomize=None,
randomize_once=None,
center=None,
rng=(2013, 2, 20),
pad_randomized=0,
flip=True):
self._window_shape = tuple(window_shape)
# Defined in setup(). A dict that maps Datasets in self._randomize and
# self._randomize_once to zero-padded versions of their topological
# views.
self._original = None
self._randomize = randomize if randomize else []
self._randomize_once = randomize_once if randomize_once else []
self._center = center if center else []
self._pad_randomized = pad_randomized
self._flip = flip
assert isinstance(self._randomize, list), (
"The 'randomize' parameter of WindowAndFlip should be a list")
assert isinstance(self._randomize_once, list), (
"The 'randomize_once' parameter of WindowAndFlip should be a list")
assert isinstance(self._center, list), (
"The 'center' parameter of WindowAndFlip should be a list")
assert isinstance(self._pad_randomized, py_integer_types), (
"The 'pad_randomized' parameter of WindowAndFlip should be an int")
if randomize is None and randomize_once is None and center is None:
warnings.warn(self.__class__.__name__ + " instantiated without "
"any dataset arguments, and therefore does nothing",
stacklevel=2)
self._rng = make_np_rng(rng, which_method="random_integers")
def setup(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
Notes
-----
`dataset` argument is ignored
"""
dataset = None
# Central windowing of auxiliary datasets (e.g. validation sets)
preprocessor = CentralWindow(self._window_shape)
for data in self._center:
preprocessor.apply(data)
#
# Do the initial random windowing
#
randomize_now = self._randomize + self._randomize_once
# maps each dataset in randomize_now to a zero-padded topological view
# of its data.
self._original = dict((data, _zero_pad(
data.get_topological_view().astype('float32'),
self._pad_randomized))
for data in randomize_now)
# For each dataset, for each image, extract a randomly positioned and
# potentially horizontal-flipped window
self.randomize_datasets(randomize_now)
def randomize_datasets(self, datasets):
"""
Applies random translations and flips to the selected datasets.
Parameters
----------
datasets : WRITEME
"""
for dataset in datasets:
if tuple(dataset.view_converter.axes) == ('c', 0, 1, 'b'):
wf_func = random_window_and_flip_c01b
elif tuple(dataset.view_converter.axes) == ('b', 0, 1, 'c'):
wf_func = random_window_and_flip_b01c
else:
raise ValueError("Axes of dataset is not supported: %s" %
(str(dataset.view_converter.axes)))
arr = wf_func(self._original[dataset],
self._window_shape,
rng=self._rng, flip=self._flip)
dataset.set_topological_view(arr, axes=dataset.view_converter.axes)
def on_monitor(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
Notes
-----
All arguments are ignored.
"""
model = None
dataset = None
algorithm = None
self.randomize_datasets(self._randomize)
| bsd-3-clause |
justincassidy/scikit-learn | benchmarks/bench_glmnet.py | 295 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/classification/plot_classification_probability.py | 241 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
adammenges/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 37 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
ITA-Solar/helita | helita/io/sdf.py | 2 | 3207 | """
Set of tools to read SDF format.
First coded: 20111227 by Tiago Pereira (tiago.pereira@nasa.gov)
"""
import numpy as np
class SDFHeader:
def __init__(self, filename, verbose=False):
self.verbose = verbose
self.query(filename)
def query(self, filename, verbose=False):
''' Queries the file, returning datasets and shapes.'''
f = open(filename, 'r')
h = f.read(11)
hdstr = str(h[:-1])
if hdstr != 'SDF format':
raise IOError('SDF header not found in' +
' %s, probably wrong or corrupt file.' % filename)
self.hdrpos = np.fromfile(f, dtype='>l', count=1)[0]
self.datapos = np.fromfile(f, dtype='>l', count=1)[0]
self.norder = np.fromfile(f, dtype='>i', count=1)[0]
self.hdrsize = np.fromfile(f, dtype='>l', count=1)[0]
header = f.read(self.hdrpos - f.tell())
self.header = header
if self.verbose:
print(header)
f.close()
self.header_data(header)
return
def header_data(self, header):
''' Breaks header string into variable informationp. '''
self.variables = {}
offset = 19 + self.hdrsize
for line in header.split('\n')[:-1]:
l = line.split()
label = l.pop(1)
order = int(l[0])
dtype = '>' + l[1] + l[2] # force big endian
nbpw = int(l[2])
ndims = int(l[3])
shape = ()
for i in range(ndims):
shape += (int(l[4 + i]),)
nbytes = nbpw * np.prod(shape)
if dtype[1] == 'c':
nbytes *= 2
if dtype[1:] == 'c4': # these are the same internally to numpy
dtype = '>c8'
self.variables[label] = [order, dtype, nbpw, offset, shape]
offset += nbytes
return
def getvar(filename, variable, memmap=False):
''' Reads variable from SDF file.
IN:
filename - string with filename
variable - string with variable name
memmap - [OPTIONAL] booleanp. If true, will return a memmap object
(ie, data is only loaded into memory when needed)
OUT:
data - array with data
'''
ff = SDFHeader(filename, verbose=False)
if variable not in ff.variables:
raise KeyError(
'(EEE) getvar: variable %s not found in %s' %
(variable, filename))
order, dtype, nbpw, offset, shape = ff.variables[variable]
if memmap:
data = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
f = open(filename, 'r')
f.seek(offset)
data = np.fromfile(f, dtype=dtype,
count=np.prod(shape)).reshape(shape[::-1]).T
f.close()
return data
def getall(filename, memmap=False):
''' Reads all the variables of an SDF file. Loads into a dictionary indexed
by variable name. '''
ff = SDFHeader(filename, verbose=False)
result = {}
for v in ff.variables:
result[v] = getvar(filename, v, memmap)
return result
| bsd-3-clause |
saurabh3949/mxnet | example/image-classification/symbols/resnext.py | 56 | 9928 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Saining Xie, Ross Girshick, Piotr Dollar, Zhuowen Tu, Kaiming He. "Aggregated Residual Transformations for Deep Neural Network"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, num_group=32, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.5), num_group=num_group, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if dim_match:
shortcut = data
else:
shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
eltwise = bn3 + shortcut
return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu')
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if dim_match:
shortcut = data
else:
shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
eltwise = bn2 + shortcut
return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu')
def resnext(units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNeXt symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
num_groupes: int
Number of conv groups
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, num_group=num_group,
bn_mom=bn_mom, workspace=workspace, memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger)
pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, num_group=32, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 32:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnext(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
num_group = num_group,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype)
| apache-2.0 |
nick-klingaman/ASoP | ASoP-Coherence/asop_coherence_example.py | 1 | 8365 | import asop_coherence as asop
import numpy as np
"""
Example use of ASoP Coherence package to compute
and plot diagnostics of the spatial and temporal
coherence of precipitation in a dataset.
This example uses TRMM 3B42v7A and CMORPH v1.0 data
for October 2009 - January 2010, as shown in
Klingaman et al. (2017, GMD, doi:10.5194/gmd-10-57-2017)
Written by Nicholas Klingaman
nicholas.klingaman@ncas.ac.uk
(C) The author 2017
"""
def get_dictionary(key):
"""
The Coherence package relies on a dataset dictionary,
for which currently the user must specify most values.
This function shows how to build a dictionary. The dictionary to
be returned to the main program is selected through the use of
a "key", here either "TRMM" or "CMORPH"
Arguments:
* key:
A string used to select the correct dataset
Returns:
* asop_dict:
A dictionary containing required and optional
parameters for the ASoP Coherence package.
Required dictionary keys:
infile - Path to input file (must be readable by Iris)
name - Name for model in plot files (no spaces)
legend_name - Name for model in legends and titles on plots (spaces allowed)
dt - Timestep of input data (in seconds)
dx - Longitudinal grid spacing at equator (in km)
dy - Latitudinal grid spacing (in km)
constraint - standard_name of data to read from netCDF file (e.g., precipitation flux)
scale_factor - Multiplier necessary to convert precipitation to units of mm/day
region - Region of data to read [minlat, maxlat, minlon, maxlon]
box_size - Length of sub-regions (square boxes) to consider for correlation analysis
as a function of physical distance
(in km, recommended value is > 6.5*dx).
color - Name of color to use on line graphs (must be recognised by matplotlib).
region_size - Length of sub-regions (square boxes) to consider for correlation analysis
as a function of model gridpoints (including lag correlations, see below)
(in units of native gridpoints, odd integers strongly recommended).
lag_length - Maximum lag to consider for correlation analysis as a function of model
gridpoints, for constructing distance vs. lag correlation diagrams.
Correlations for lags from 0 (coincidence) until lag_length will be computed.
autocorr_length - The maximum autocorrelation to analyse (in seconds).
Optional dictionary keys, which are useful mainly if analysing the same
dataset on more than one grid / temporal sampling interval:
grid_type - A string describing the grid, used in output filenames
(recommend no spaces).
time_type - A string describing the temporal sampling, used in output filenames
(recommend no spaces).
grid_desc - A string describing the grid, used in plot titles (can contain spaces).
time_desc - A string describing the temporal sampling, used in plot titles
(can contain spaces).
"""
asop_dict = {}
if key == 'TRMM':
asop_dict['infile'] = 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc'
asop_dict['name'] = 'TRMM_3B42v7A'
asop_dict['dt'] = 10800
asop_dict['dx'] = 27
asop_dict['dy'] = 27
asop_dict['constraint'] = 'precipitation'
asop_dict['scale_factor'] = 8.0
asop_dict['legend_name'] = 'TRMM 3B42v7A'
asop_dict['region'] = [-10,10,60,90]
asop_dict['box_size'] = 1680
asop_dict['color'] = 'red'
asop_dict['region_size'] = 7
asop_dict['lag_length'] = 6
asop_dict['grid_type'] = 'native'
asop_dict['time_type'] = '3hr'
asop_dict['grid_desc'] = 'native'
asop_dict['time_desc'] = '3-hourly'
asop_dict['autocorr_length'] = 60*60*24
elif key == 'CMORPH':
asop_dict['infile'] = 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc'
asop_dict['name'] = 'CMORPH_V1.0'
asop_dict['dt'] = 10800
asop_dict['dx'] = 27
asop_dict['dy'] = 27
asop_dict['constraint'] = 'precip'
asop_dict['scale_factor'] = 8.0
asop_dict['legend_name'] = 'CMORPH 1.0'
asop_dict['region'] = [-10,10,60,90]
asop_dict['box_size'] = 1680
asop_dict['color'] = 'blue'
asop_dict['region_size'] = 7
asop_dict['lag_length'] = 6
asop_dict['grid_type'] = 'native'
asop_dict['time_type'] = '3hr'
asop_dict['grid_desc'] = 'native'
asop_dict['time_desc'] = '3-hourly'
asop_dict['autocorr_length'] = 60*60*24
return(asop_dict)
if __name__ == '__main__':
datasets = ('TRMM','CMORPH')
n_datasets = len(datasets)
# Allocate memory for multi-model fields
max_box_distance,max_timesteps,max_boxes = asop.parameters()
all_distance_correlations = np.zeros((n_datasets,max_box_distance))
all_distance_ranges = np.zeros((n_datasets,3,max_box_distance))
all_distance_max = np.zeros((n_datasets),dtype=np.int)
all_time_correlations = np.zeros((n_datasets,max_timesteps))
all_time_max = np.zeros((n_datasets),dtype=np.int)
all_dt = np.zeros((n_datasets),dtype=np.int)
all_colors = []
all_legend_names = []
for i in xrange(n_datasets):
print '--> '+datasets[i]
asop_dict = get_dictionary(datasets[i])
# Read precipitation data
precip = asop.read_precip(asop_dict)
# Define edges of bins for 1D and 2D histograms
# Note that on plots, the first and last edges will be
# replaced by < and > signs, respectively.
bins=[0,1,2,4,6,9,12,16,20,25,30,40,60,90,130,180,2e20]
bins=np.asarray(bins)
# Compute 1D and 2D histograms
oned_hist, twod_hist = asop.compute_histogram(precip,bins)
# Plot 1D and 2D histograms (e.g., Fig. 2a in Klingaman et al. 2017).
asop.plot_histogram(oned_hist,twod_hist,asop_dict,bins)
# Compute correlations as a function of native gridpoints, by dividing
# analysis region into sub-regions (boxes of length region_size). Also
# computes lag correlations to a maximum lag of lag_length.
corr_map,lag_vs_distance,autocorr,npts_map,npts = asop.compute_equalgrid_corr(precip,asop_dict)
# Plot correlations as a function of native gridpoints and time lag
# (e.g., Figs. 2c and 2e in Klingaman et al. 2017).
asop.plot_equalgrid_corr(corr_map,lag_vs_distance,autocorr,npts,asop_dict)
# Compute correlations as a function of physical distance, by dividing
# analysis region into sub-regions (boxes of length box_size).
all_distance_correlations[i,:],all_distance_ranges[i,:,:],all_distance_max[i] = asop.compute_equalarea_corr(precip,asop_dict)
# Compute lagged autocorrelations over all points
all_time_correlations[i,:],all_time_max[i] = asop.compute_autocorr(precip,asop_dict)
# Compute spatial and temporal coherence metrics, based on quartiles (4 divisions)
space_inter, time_inter = asop.compute_spacetime_summary(precip,4)
# Save dataset timestep information
all_dt[i] = asop_dict['dt']
# Save color and legend information
all_colors.append(asop_dict['color'])
all_legend_names.append(asop_dict['legend_name'])
# Plot correlations as a function of physical distance for all datasets
asop.plot_equalarea_corr(all_distance_correlations,all_distance_ranges,all_distance_max,colors=all_colors,legend_names=all_legend_names,set_desc='satobs')
# Plot correlations as a function of physical time for all datasets
asop.plot_autocorr(all_time_correlations,all_time_max,dt=all_dt,colors=all_colors,legend_names=all_legend_names,set_desc='satobs')
| apache-2.0 |
zhangfengesri/MLPython | script/exercise_bmlsp_postclustering.py | 1 | 1790 | """
Exercise 2 - Posts<BMLSP>
Clustering Machine Learning Example
"""
# Author: Feng Zhang <fzhang@esri.com>
# License: Simplified BSD
import os
import sys
import numpy as np
import scipy as sp
import nltk.stem
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
# Define Stemmed Class for TfidfVectorizer
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
# Read the data from file
data_folder = sys.argv[1]
data_train = sklearn.datasets.load_mlcomp("20news-18828", "train", mlcomp_root=data_folder, categories=['comp.graphics', 'com.sys.ibm.pc.hardware', 'sci.space'])
data_test = sklearn.datasets.load_mlcomp("20news-18828", "test", mlcomp_root=data_folder, categories=['comp.graphics', 'com.sys.ibm.pc.hardware', 'sci.space'])
print(len(data_train.filenames))
# Clean and load the data
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', charset_error='ignore')
data_train_vector = vectorizer.fit_tranform(data_train.data)
data_test_vector = vectorizer.tranform(data_test.data)
# Print and explore the data
num_samples, num_features = data_train_vector.shape
print("# of Samples %d, # of Features %d" %(num_samples, num_features))
# Fit the polynomial regression model
clf_KMeans = KMeans(n_clusters=50, init="ramdom", n_init=1, verbose=1).fit(data_train_vector)
# Plot the fitted model
print(clf_KMeans.labels_)
print(clf_KMeans.labels_.shape)
# predict using the model
y_predicted = clf_KMeans.predict(data_test_vector)
# validate the model
# print the result
| bsd-2-clause |
pystruct/pystruct | benchmarks/random_tree_crf.py | 1 | 1467 | import numpy as np
from scipy import sparse
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from scipy.sparse.csgraph import minimum_spanning_tree
from pystruct.learners import SubgradientSSVM
from pystruct.models import GraphCRF
def make_random_trees(n_samples=50, n_nodes=100, n_states=7, n_features=10):
crf = GraphCRF(inference_method='max-product', n_states=n_states,
n_features=n_features)
weights = np.random.randn(crf.size_joint_feature)
X, y = [], []
for i in range(n_samples):
distances = np.random.randn(n_nodes, n_nodes)
features = np.random.randn(n_nodes, n_features)
tree = minimum_spanning_tree(sparse.csr_matrix(distances))
edges = np.c_[tree.nonzero()]
X.append((features, edges))
y.append(crf.inference(X[-1], weights))
return X, y, weights
X, y, weights = make_random_trees(n_nodes=1000)
X_train, X_test, y_train, y_test = train_test_split(X, y)
#tree_model = MultiLabelClf(edges=tree, inference_method=('ogm', {'alg': 'dyn'}))
tree_model = GraphCRF(inference_method='max-product')
tree_ssvm = SubgradientSSVM(tree_model, max_iter=4, C=1, verbose=10)
print("fitting tree model...")
tree_ssvm.fit(X_train, y_train)
print("Training loss tree model: %f" % tree_ssvm.score(X_train, y_train))
print("Test loss tree model: %f" % tree_ssvm.score(X_test, y_test))
| bsd-2-clause |
QijunPan/ansible | lib/ansible/modules/cloud/smartos/vmadm.py | 10 | 24627 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmadm
short_description: Manage SmartOS virtual machines and zones.
description:
- Manage SmartOS virtual machines through vmadm(1M).
version_added: "2.3"
author: Jasper Lievisse Adriaanse (@jasperla)
options:
archive_on_delete:
required: false
description:
- When enabled, the zone dataset will be mounted on C(/zones/archive)
upon removal.
autoboot:
required: false
description:
- Whether or not a VM is booted when the system is rebooted.
brand:
required: true
choices: [ joyent, joyent-minimal, kvm, lx ]
default: joyent
description:
- Type of virtual machine.
boot:
required: false
description:
- Set the boot order for KVM VMs.
cpu_cap:
required: false
description:
- Sets a limit on the amount of CPU time that can be used by a VM.
Use C(0) for no cap.
cpu_shares:
required: false
description:
- Sets a limit on the number of fair share scheduler (FSS) CPU shares for
a VM. This limit is relative to all other VMs on the system.
cpu_type:
required: false
choices: [ qemu64, host ]
default: qemu64
description:
- Control the type of virtual CPU exposed to KVM VMs.
customer_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contain customer
modifiable keys.
delegate_dataset:
required: false
description:
- Whether to delegate a ZFS dataset to an OS VM.
disk_driver:
required: false
description:
- Default value for a virtual disk model for KVM guests.
disks:
required: false
description:
- A list of disks to add, valid properties are documented in vmadm(1M).
dns_domain:
required: false
description:
- Domain value for C(/etc/hosts).
filesystems:
required: false
description:
- Mount additional filesystems into an OS VM.
firewall_enabled:
required: false
description:
- Enables the firewall, allowing fwadm(1M) rules to be applied.
force:
required: false
description:
- Force a particular action (i.e. stop or delete a VM).
fs_allowed:
required: false
description:
- Comma separated list of filesystem types this zone is allowed to mount.
hostname:
required: false
description:
- Zone/VM hostname.
image_uuid:
required: false
description:
- Image UUID.
indestructible_delegated:
required: false
description:
- Adds an C(@indestructible) snapshot to delegated datasets.
indestructible_zoneroot:
required: false
description:
- Adds an C(@indestructible) snapshot to zoneroot.
internal_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contains operator
generated keys.
internal_metadata_namespace:
required: false
description:
- List of namespaces to be set as I(internal_metadata-only); these namespaces
will come from I(internal_metadata) rather than I(customer_metadata).
kernel_version:
required: false
description:
- Kernel version to emulate for LX VMs.
limit_priv:
required: false
description:
- Set (comma separated) list of privileges the zone is allowed to use.
maintain_resolvers:
required: false
description:
- Resolvers in C(/etc/resolv.conf) will be updated when updating
the I(resolvers) property.
max_locked_memory:
required: false
description:
- Total amount of memory (in MiBs) on the host that can be locked by this VM.
max_lwps:
required: false
description:
- Maximum number of lightweight processes this VM is allowed to have running.
max_physical_memory:
required: false
description:
- Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
max_swap:
required: false
description:
- Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
mdata_exec_timeout:
required: false
description:
- Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
that runs user-scripts in the zone.
name:
required: false
aliases: [ alias ]
description:
- Name of the VM. vmadm(1M) uses this as an optional name.
nic_driver:
required: false
description:
- Default value for a virtual NIC model for KVM guests.
nics:
required: false
description:
- A list of nics to add, valid properties are documented in vmadm(1M).
nowait:
required: false
description:
- Consider the provisioning complete when the VM first starts, rather than
when the VM has rebooted.
qemu_opts:
required: false
description:
- Additional qemu arguments for KVM guests. This overwrites the default arguments
provided by vmadm(1M) and should only be used for debugging.
qemu_extra_opts:
required: false
description:
- Additional qemu cmdline arguments for KVM guests.
quota:
required: false
description:
- Quota on zone filesystems (in MiBs).
ram:
required: false
description:
- Amount of virtual RAM for a KVM guest (in MiBs).
resolvers:
required: false
description:
- List of resolvers to be put into C(/etc/resolv.conf).
routes:
required: false
description:
- Dictionary that maps destinations to gateways, these will be set as static
routes in the VM.
spice_opts:
required: false
description:
- Addition options for SPICE-enabled KVM VMs.
spice_password:
required: false
description:
- Password required to connect to SPICE. By default no password is set.
Please note this can be read from the Global Zone.
state:
required: true
choices: [ present, absent, stopped, restarted ]
description:
- States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
operate on a VM that is currently provisioned. C(present) means that the VM will be
created if it was absent, and that it will be in a running state. C(absent) will
shutdown the zone before removing it.
C(stopped) means the zone will be created if it doesn't exist already, before shutting
it down.
tmpfs:
required: false
description:
- Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
uuid:
required: false
description:
- UUID of the VM. Can either be a full UUID or C(*) for all VMs.
vcpus:
required: false
description:
- Number of virtual CPUs for a KVM guest.
vga:
required: false
description:
- Specify VGA emulation used by KVM VMs.
virtio_txburst:
required: false
description:
- Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
virtio_txtimer:
required: false
description:
- Timeout (in nanoseconds) for the TX timer of virtio NICs.
vnc_password:
required: false
description:
- Password required to connect to VNC. By default no password is set.
Please note this can be read from the Global Zone.
vnc_port:
required: false
description:
- TCP port to listen of the VNC server. Or set C(0) for random,
or C(-1) to disable.
zfs_data_compression:
required: false
description:
- Specifies compression algorithm used for this VMs data dataset. This option
only has effect on delegated datasets.
zfs_data_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the delegated dataset's filesystem.
zfs_filesystem_limit:
required: false
description:
- Maximum number of filesystems the VM can have.
zfs_io_priority:
required: false
description:
- IO throttle priority value relative to other VMs.
zfs_root_compression:
required: false
description:
- Specifies compression algorithm used for this VMs root dataset. This option
only has effect on the zoneroot dataset.
zfs_root_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
zfs_snapshot_limit:
required: false
description:
- Number of snapshots the VM can have.
zpool:
required: false
description:
- ZFS pool the VM's zone dataset will be created in.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: create SmartOS zone
vmadm:
brand: joyent
state: present
alias: fw_zone
image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
firewall_enabled: yes
indestructible_zoneroot: yes
nics:
- nic_tag: admin
ip: dhcp
primary: true
internal_metadata:
root_pw: 'secret'
quota: 1
- name: Delete a zone
vmadm:
alias: test_zone
state: deleted
- name: Stop all zones
vmadm:
uuid: '*'
state: stopped
'''
RETURN = '''
uuid:
description: UUID of the managed VM.
returned: always
type: string
sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
alias:
description: Alias of the managed VM.
returned: When addressing a VM by alias.
type: string
sample: 'dns-zone'
state:
description: State of the target, after execution.
returned: success
type: string
sample: 'running'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
import os
import re
import tempfile
import traceback
try:
import json
except ImportError:
import simplejson as json
# While vmadm(1M) supports a -E option to return any errors in JSON, the
# generated JSON does not play well with the JSON parsers of Python.
# The returned message contains '\n' as part of the stacktrace,
# which breaks the parsers.
def get_vm_prop(module, uuid, prop):
# Lookup a property for the given VM.
# Returns the property, or None if not found.
cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
try:
stdout_json = json.loads(stdout)
except:
e = get_exception()
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
details=to_native(e))
if len(stdout_json) > 0 and prop in stdout_json[0]:
return stdout_json[0][prop]
else:
return None
def get_vm_uuid(module, alias):
# Lookup the uuid that goes with the given alias.
# Returns the uuid or '' if not found.
cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
# If no VM was found matching the given alias, we get back an empty array.
# That is not an error condition as we might be explicitly checking it's
# absence.
if stdout.strip() == '[]':
return None
else:
try:
stdout_json = json.loads(stdout)
except:
e = get_exception()
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
details=to_native(e))
if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
return stdout_json[0]['uuid']
def get_all_vm_uuids(module):
# Retrieve the UUIDs for all VMs.
cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg='Failed to get VMs list', exception=stderr)
try:
stdout_json = json.loads(stdout)
return [v['uuid'] for v in stdout_json]
except:
e = get_exception()
module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e))
def new_vm(module, uuid, vm_state):
payload_file = create_payload(module, uuid)
(rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
if rc != 0:
changed = False
module.fail_json(msg='Could not create VM', exception=stderr)
else:
changed = True
# 'vmadm create' returns all output to stderr...
match = re.match('Successfully created VM (.*)', stderr)
if match:
vm_uuid = match.groups()[0]
if not is_valid_uuid(vm_uuid):
module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
else:
module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
# Now that the VM is created, ensure it is in the desired state (if not 'running')
if vm_state != 'running':
ret = set_vm_state(module, vm_uuid, vm_state)
if not ret:
module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
try:
os.unlink(payload_file)
except Exception as e:
# Since the payload may contain sensitive information, fail hard
# if we cannot remove the file so the operator knows about it.
module.fail_json(
msg='Could not remove temporary JSON payload file {0}'.format(payload_file),
exception=traceback.format_exc(e))
return changed, vm_uuid
def vmadm_create_vm(module, payload_file):
# Create a new VM using the provided payload.
cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
return module.run_command(cmd)
def set_vm_state(module, vm_uuid, vm_state):
p = module.params
# Check if the VM is already in the desired state.
state = get_vm_prop(module, vm_uuid, 'state')
if state and (state == vm_state):
return None
# Lookup table for the state to be in, and which command to use for that.
# vm_state: [vmadm commandm, forceable?]
cmds = {
'stopped': ['stop', True],
'running': ['start', False],
'deleted': ['delete', True],
'rebooted': ['reboot', False]
}
if p['force'] and cmds[vm_state][1]:
force = '-F'
else:
force = ''
cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
(rc, stdout, stderr) = module.run_command(cmd)
match = re.match('^Successfully.*', stderr)
if match:
return True
else:
return False
def create_payload(module, uuid):
# Create the JSON payload (vmdef) and return the filename.
p = module.params
# Filter out the few options that are not valid VM properties.
module_options = ['debug', 'force', 'state']
vmattrs = filter(lambda prop: prop not in module_options, p)
vmdef = {}
for attr in vmattrs:
if p[attr]:
vmdef[attr] = p[attr]
try:
vmdef_json = json.dumps(vmdef)
except Exception as e:
module.fail_json(
msg='Could not create valid JSON payload', exception=traceback.format_exc(e))
# Create the temporary file that contains our payload, and set tight
# permissions for it may container sensitive information.
try:
# XXX: When there's a way to get the current ansible temporary directory
# drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
# the payload (thus removing the `save_payload` option).
fname = tempfile.mkstemp()[1]
fh = open(fname, 'w')
os.chmod(fname, 0o400)
fh.write(vmdef_json)
fh.close()
except Exception as e:
module.fail_json(
msg='Could not save JSON payload', exception=traceback.format_exc(e))
return fname
def vm_state_transition(module, uuid, vm_state):
ret = set_vm_state(module, uuid, vm_state)
# Whether the VM changed state.
if ret is None:
return False
elif ret:
return True
else:
module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
def is_valid_uuid(uuid):
if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
return True
else:
return False
def validate_uuids(module):
# Perform basic UUID validation.
failed = []
for u in [['uuid', module.params['uuid']],
['image_uuid', module.params['image_uuid']]]:
if u[1] and u[1] != '*':
if not is_valid_uuid(u[1]):
failed.append(u[0])
if len(failed) > 0:
module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
def manage_all_vms(module, vm_state):
# Handle operations for all VMs, which can by definition only
# be state transitions.
state = module.params['state']
if state == 'created':
module.fail_json(msg='State "created" is only valid for tasks with a single VM')
# If any of the VMs has a change, the task as a whole has a change.
any_changed = False
# First get all VM uuids and for each check their state, and adjust it if needed.
for uuid in get_all_vm_uuids(module):
current_vm_state = get_vm_prop(module, uuid, 'state')
if not current_vm_state and vm_state == 'deleted':
any_changed = False
else:
if module.check_mode:
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
any_changed = True
else:
any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
return any_changed
def main():
# In order to reduce the clutter and boilerplate for trivial options,
# abstract the vmadm properties and build the dict of arguments later.
# Dict of all options that are simple to define based on their type.
# They're not required and have a default of None.
properties = {
'str': [
'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
'image_uuid', 'internal_metadata_namespace', 'kernel_version',
'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
'zfs_root_compression', 'zpool'
],
'bool': [
'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
'firewall_enabled', 'force', 'indestructible_delegated',
'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
],
'int': [
'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
'zfs_snapshot_limit'
],
'dict': ['customer_metadata', 'internal_metadata', 'routes'],
'list': ['disks', 'nics', 'resolvers', 'filesystems']
}
# Start with the options that are not as trivial as those above.
options = dict(
state=dict(
default='running',
type='str',
choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
),
name=dict(
default=None, type='str',
aliases=['alias']
),
brand=dict(
default='joyent',
type='str',
choices=['joyent', 'joyent-minimal', 'kvm', 'lx']
),
cpu_type=dict(
default='qemu64',
type='str',
choices=['host','qemu64']
),
# Regular strings, however these require additional options.
spice_password=dict(type='str', no_log=True),
vnc_password=dict(type='str', no_log=True),
)
# Add our 'simple' options to options dict.
for type in properties:
for p in properties[type]:
option = dict(default=None, type=type)
options[p] = option
module = AnsibleModule(
argument_spec=options,
supports_check_mode=True,
required_one_of=[['name', 'uuid']]
)
module.vmadm = module.get_bin_path('vmadm', required=True)
p = module.params
uuid = p['uuid']
state = p['state']
# Translate the state paramter into something we can use later on.
if state in ['present', 'running']:
vm_state = 'running'
elif state in ['stopped', 'created']:
vm_state = 'stopped'
elif state in ['absent', 'deleted']:
vm_state = 'deleted'
elif state in ['restarted', 'rebooted']:
vm_state = 'rebooted'
result = {'state': state}
# While it's possible to refer to a given VM by it's `alias`, it's easier
# to operate on VMs by their UUID. So if we're not given a `uuid`, look
# it up.
if not uuid:
uuid = get_vm_uuid(module, p['name'])
# Bit of a chicken and egg problem here for VMs with state == deleted.
# If they're going to be removed in this play, we have to lookup the
# uuid. If they're already deleted there's nothing to looup.
# So if state == deleted and get_vm_uuid() returned '', the VM is already
# deleted and there's nothing else to do.
if uuid is None and vm_state == 'deleted':
result['name'] = p['name']
module.exit_json(**result)
validate_uuids(module)
if p['name']:
result['name'] = p['name']
result['uuid'] = uuid
if uuid == '*':
result['changed'] = manage_all_vms(module, vm_state)
module.exit_json(**result)
# The general flow is as follows:
# - first the current state of the VM is obtained by it's UUID.
# - If the state was not found and the desired state is 'deleted', return.
# - If the state was not found, it means the VM has to be created.
# Subsequently the VM will be set to the desired state (i.e. stopped)
# - Otherwise, it means the VM exists already and we operate on it's
# state (i.e. reboot it.)
#
# In the future it should be possible to query the VM for a particular
# property as a valid state (i.e. queried) so the result can be
# registered.
# Also, VMs should be able to get their properties updated.
# Managing VM snapshots should be part of a standalone module.
# First obtain the VM state to determine what needs to be done with it.
current_vm_state = get_vm_prop(module, uuid, 'state')
# First handle the case where the VM should be deleted and is not present.
if not current_vm_state and vm_state == 'deleted':
result['changed'] = False
elif module.check_mode:
# Shortcut for check mode, if there is no VM yet, it will need to be created.
# Or, if the VM is not in the desired state yet, it needs to transition.
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
# No VM was found that matched the given ID (alias or uuid), so we create it.
elif not current_vm_state:
result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
else:
# VM was found, operate on its state directly.
result['changed'] = vm_state_transition(module, uuid, vm_state)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
letconex/MMT | src/decoder-neural/src/main/python/nmmt/torch_utils.py | 1 | 1144 | import torch
_torch_gpus = None
def torch_setup(gpus=None, random_seed=None):
global _torch_gpus
if torch.cuda.is_available():
if gpus is None:
gpus = range(torch.cuda.device_count()) if torch.cuda.is_available() else None
else:
# remove indexes of GPUs which are not valid,
# because larger than the number of available GPU or smaller than 0
gpus = [x for x in gpus if x < torch.cuda.device_count() or x < 0]
if len(gpus) == 0:
gpus = None
else:
gpus = None
if random_seed is not None:
torch.manual_seed(random_seed)
if torch.cuda.is_available():
torch.cuda.random.manual_seed_all(random_seed)
if gpus is not None and len(gpus) > 0:
torch.cuda.set_device(gpus[0])
_torch_gpus = gpus
def torch_get_gpus():
global _torch_gpus
return _torch_gpus
def torch_is_using_cuda():
global _torch_gpus
return _torch_gpus is not None and len(_torch_gpus) > 0
def torch_is_multi_gpu():
global _torch_gpus
return _torch_gpus is not None and len(_torch_gpus) > 1
| apache-2.0 |
gsig/srnn | srnn-pytorch/srnn.py | 1 | 2966 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def tensorchoice(n, scores):
p = scores.detach().numpy()
p = p / p.sum()
return np.random.choice(n, p=p)
def tensormax(n, scores):
p = scores.detach().numpy()
return np.argmax(p)
def pick_probabilities(k, numberspicked, n, current):
# probabilities of picking each of the future elements
# if we are picking k elements sequentially without replacement
prob = torch.zeros(n)
no = 1.
remaining_picks = k - numberspicked
for i in range(current, n - remaining_picks + 1):
s = remaining_picks / float(n - i)
prob[i] = s * no
no = no * (1 - s)
return prob
class SRNN(nn.Module):
# This should follow similar semantics as nn.LSTM
def __init__(self, input_dim, hidden_dim, subset=10,
input_fun=lambda x: x,
output_fun=lambda x: x,
similarity=lambda x, y: (x * y).sum(2).sum(1),
choice=tensormax):
super(SRNN, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.input_fun = input_fun
self.output_fun = output_fun
self.similarity = similarity
self.choice = choice
self.hidden = self.init_hidden()
self.output = self.init_output()
self.subset = subset
def init_hidden(self):
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def scores(self, embeds, numberspicked, current):
n = len(embeds)
scores = self.similarity(self.output, embeds)
scores = F.softmax(scores, dim=0)
scores = scores * pick_probabilities(self.subset, numberspicked, n, current)
scores = scores / scores.sum()
return scores
def init_output(self):
return self.output_fun(torch.zeros(1, 1, self.hidden_dim))
def forward(self, input):
n = len(input)
embeds = self.input_fun(input)
# pick first node
scores = self.scores(embeds, 0, 0)
choice = self.choice(n, scores)
picks = [choice]
loss = -torch.log(scores[choice]) / n
outputs = []
for i, e in enumerate(embeds):
outputs.append(self.output)
if picks[-1] > i:
# skip elements until next node
continue
lstm_out, self.hidden = self.lstm(e.view(1, 1, -1), self.hidden)
self.output = self.output_fun(lstm_out.view(1, -1))
if len(picks) < self.subset:
# pick next node
scores = self.scores(embeds, len(picks), i + 1)
choice = self.choice(n, scores)
picks.append(choice)
loss -= torch.log(scores[choice]) / (n - i)
return loss, outputs, picks
| gpl-3.0 |
Lawrence-Liu/scikit-learn | examples/neighbors/plot_regression.py | 346 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
quheng/scikit-learn | examples/neighbors/plot_regression.py | 346 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/robust/robust_linear_model.py | 27 | 25571 | """
Robust linear models with support for the M-estimators listed under
:ref:`norms <norms>`.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York. 1981.
PJ Huber. 1973, 'The 1972 Wald Memorial Lectures: Robust Regression:
Asymptotics, Conjectures, and Monte Carlo.' The Annals of Statistics,
1.5, 799-821.
R Venables, B Ripley. 'Modern Applied Statistics in S' Springer, New York,
2002.
"""
from statsmodels.compat.python import string_types
import numpy as np
import scipy.stats as stats
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.regression.linear_model as lm
import statsmodels.robust.norms as norms
import statsmodels.robust.scale as scale
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.compat.numpy import np_matrix_rank
__all__ = ['RLM']
def _check_convergence(criterion, iteration, tol, maxiter):
return not (np.any(np.fabs(criterion[iteration] -
criterion[iteration-1]) > tol) and iteration < maxiter)
class RLM(base.LikelihoodModel):
__doc__ = """
Robust Linear Models
Estimate a robust linear model via iteratively reweighted least squares
given a robust criterion estimator.
%(params)s
M : statsmodels.robust.norms.RobustNorm, optional
The robust criterion function for downweighting outliers.
The current options are LeastSquares, HuberT, RamsayE, AndrewWave,
TrimmedMean, Hampel, and TukeyBiweight. The default is HuberT().
See statsmodels.robust.norms for more information.
%(extra_params)s
Notes
-----
**Attributes**
df_model : float
The degrees of freedom of the model. The number of regressors p less
one for the intercept. Note that the reported model degrees
of freedom does not count the intercept as a regressor, though
the model is assumed to have an intercept.
df_resid : float
The residual degrees of freedom. The number of observations n
less the number of regressors p. Note that here p does include
the intercept as using a degree of freedom.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
M : statsmodels.robust.norms.RobustNorm
See above. Robust estimator instance instantiated.
nobs : float
The number of observations n
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
RLM has no whiten method, so this is just the pseudo inverse of the
design.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
Examples
---------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> data.exog = sm.add_constant(data.exog)
>>> rlm_model = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.HuberT())
>>> rlm_results = rlm_model.fit()
>>> rlm_results.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results.bse
array([ 0.11100521, 0.30293016, 0.12864961, 9.79189854])
>>> rlm_results_HC2 = rlm_model.fit(cov="H2")
>>> rlm_results_HC2.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results_HC2.bse
array([ 0.11945975, 0.32235497, 0.11796313, 9.08950419])
>>>
>>> rlm_hamp_hub = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.Hampel()).fit(
sm.robust.scale.HuberScale())
>>> rlm_hamp_hub.params
array([ 0.73175452, 1.25082038, -0.14794399, -40.27122257])
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def __init__(self, endog, exog, M=norms.HuberT(), missing='none',
**kwargs):
self.M = M
super(base.LikelihoodModel, self).__init__(endog, exog,
missing=missing, **kwargs)
self._initialize()
#things to remove_data
self._data_attr.extend(['weights', 'pinv_wexog'])
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = (np.float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
self.df_model = np.float(np_matrix_rank(self.exog)-1)
self.nobs = float(self.endog.shape[0])
def score(self, params):
raise NotImplementedError
def information(self, params):
raise NotImplementedError
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like, optional after fit has been called
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
#copied from linear_model
if exog is None:
exog = self.exog
return np.dot(exog, params)
def loglike(self, params):
raise NotImplementedError
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
return self.M((self.endog - tmp_results.fittedvalues) /
tmp_results.scale).sum()
def _update_history(self, tmp_results, history, conv):
history['params'].append(tmp_results.params)
history['scale'].append(tmp_results.scale)
if conv == 'dev':
history['deviance'].append(self.deviance(tmp_results))
elif conv == 'sresid':
history['sresid'].append(tmp_results.resid/tmp_results.scale)
elif conv == 'weights':
history['weights'].append(tmp_results.model.weights)
return history
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid, center=0)
if self.scale_est.lower() == 'stand_mad':
return scale.mad(resid)
else:
raise ValueError("Option %s for scale_est not understood" %
self.scale_est)
elif isinstance(self.scale_est, scale.HuberScale):
return self.scale_est(self.df_resid, self.nobs, resid)
else:
return scale.scale_est(self, resid)**2
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev'):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : string
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : string, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : string
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : string or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
Returns
-------
results : object
statsmodels.rlm.RLMresults
"""
if not cov.upper() in ["H1","H2","H3"]:
raise ValueError("Covariance matrix %s not understood" % cov)
else:
self.cov = cov.upper()
conv = conv.lower()
if not conv in ["weights","coefs","dev","sresid"]:
raise ValueError("Convergence argument %s not understood" \
% conv)
self.scale_est = scale_est
if (isinstance(scale_est,
string_types) and scale_est.lower() == "stand_mad"):
from warnings import warn
warn("stand_mad is deprecated and will be removed in 0.7.0",
FutureWarning)
wls_results = lm.WLS(self.endog, self.exog).fit()
if not init:
self.scale = self._estimate_scale(wls_results.resid)
history = dict(params = [np.inf], scale = [])
if conv == 'coefs':
criterion = history['params']
elif conv == 'dev':
history.update(dict(deviance = [np.inf]))
criterion = history['deviance']
elif conv == 'sresid':
history.update(dict(sresid = [np.inf]))
criterion = history['sresid']
elif conv == 'weights':
history.update(dict(weights = [np.inf]))
criterion = history['weights']
# done one iteration so update
history = self._update_history(wls_results, history, conv)
iteration = 1
converged = 0
while not converged:
self.weights = self.M.weights(wls_results.resid/self.scale)
wls_results = lm.WLS(self.endog, self.exog,
weights=self.weights).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
history = self._update_history(wls_results, history, conv)
iteration += 1
converged = _check_convergence(criterion, iteration, tol, maxiter)
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
history['iteration'] = iteration
results.fit_history = history
results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,
norm=self.M.__class__.__name__, conv=conv)
#norm is not changed in fit, no old state
#doing the next causes exception
#self.cov = self.scale_est = None #reset for additional fits
#iteration and history could contain wrong state with repeated fit
return RLMResultsWrapper(results)
class RLMResults(base.LikelihoodModelResults):
"""
Class to contain RLM results
Returns
-------
**Attributes**
bcov_scaled : array
p x p scaled covariance matrix specified in the model fit method.
The default is H1. H1 is defined as
``k**2 * (1/df_resid*sum(M.psi(sresid)**2)*scale**2)/
((1/nobs*sum(M.psi_deriv(sresid)))**2) * (X.T X)^(-1)``
where ``k = 1 + (df_model +1)/nobs * var_psiprime/m**2``
where ``m = mean(M.psi_deriv(sresid))`` and
``var_psiprime = var(M.psi_deriv(sresid))``
H2 is defined as
``k * (1/df_resid) * sum(M.psi(sresid)**2) *scale**2/
((1/nobs)*sum(M.psi_deriv(sresid)))*W_inv``
H3 is defined as
``1/k * (1/df_resid * sum(M.psi(sresid)**2)*scale**2 *
(W_inv X.T X W_inv))``
where `k` is defined as above and
``W_inv = (M.psi_deriv(sresid) exog.T exog)^(-1)``
See the technical documentation for cleaner formulae.
bcov_unscaled : array
The usual p x p covariance matrix with scale set equal to 1. It
is then just equivalent to normalized_cov_params.
bse : array
An array of the standard errors of the parameters. The standard
errors are taken from the robust covariance matrix specified in the
argument to fit.
chisq : array
An array of the chi-squared values of the paramter estimates.
df_model
See RLM.df_model
df_resid
See RLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `deviance`,
`params`, `iteration` and the convergence criteria specified in
`RLM.fit`, if different from `deviance` or `params`.
fit_options : dict
Contains the options given to fit.
fittedvalues : array
The linear predicted values. dot(exog, params)
model : statsmodels.rlm.RLM
A reference to the model instance
nobs : float
The number of observations n
normalized_cov_params : array
See RLM.normalized_cov_params
params : array
The coefficients of the fitted model
pinv_wexog : array
See RLM.pinv_wexog
pvalues : array
The p values associated with `tvalues`. Note that `tvalues` are assumed to be distributed
standard normal rather than Student's t.
resid : array
The residuals of the fitted model. endog - fittedvalues
scale : float
The type of scale is determined in the arguments to the fit method in
RLM. The reported scale is taken from the residuals of the weighted
least squares in the last IRLS iteration if update_scale is True. If
update_scale is False, then it is the scale given by the first OLS
fit before the IRLS iterations.
sresid : array
The scaled residuals.
tvalues : array
The "t-statistics" of params. These are defined as params/bse where bse are taken
from the robust covariance matrix specified in the argument to fit.
weights : array
The reported weights are determined by passing the scaled residuals
from the last weighted least squares fit in the IRLS algortihm.
See also
--------
statsmodels.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale):
super(RLMResults, self).__init__(model, params,
normalized_cov_params, scale)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self.nobs = model.nobs
self._cache = resettable_cache()
#for remove_data
self.data_in_cache = ['sresid']
self.cov_params_default = self.bcov_scaled
#TODO: "pvals" should come from chisq on bse?
@cache_readonly
def fittedvalues(self):
return np.dot(self.model.exog, self.params)
@cache_readonly
def resid(self):
return self.model.endog - self.fittedvalues # before bcov
@cache_readonly
def sresid(self):
return self.resid/self.scale
@cache_readonly
def bcov_unscaled(self):
return self.normalized_cov_params
@cache_readonly
def weights(self):
return self.model.weights
@cache_readonly
def bcov_scaled(self):
model = self.model
m = np.mean(model.M.psi_deriv(self.sresid))
var_psiprime = np.var(model.M.psi_deriv(self.sresid))
k = 1 + (self.df_model+1)/self.nobs * var_psiprime/m**2
if model.cov == "H1":
return k**2 * (1/self.df_resid*\
np.sum(model.M.psi(self.sresid)**2)*self.scale**2)\
/((1/self.nobs*np.sum(model.M.psi_deriv(self.sresid)))**2)\
*model.normalized_cov_params
else:
W = np.dot(model.M.psi_deriv(self.sresid)*model.exog.T,
model.exog)
W_inv = np.linalg.inv(W)
# [W_jk]^-1 = [SUM(psi_deriv(Sr_i)*x_ij*x_jk)]^-1
# where Sr are the standardized residuals
if model.cov == "H2":
# These are correct, based on Huber (1973) 8.13
return k*(1/self.df_resid)*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
/((1/self.nobs)*np.sum(\
model.M.psi_deriv(self.sresid)))*W_inv
elif model.cov == "H3":
return k**-1*1/self.df_resid*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
*np.dot(np.dot(W_inv, np.dot(model.exog.T,model.exog)),\
W_inv)
@cache_readonly
def pvalues(self):
return stats.norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.bcov_scaled))
@cache_readonly
def chisq(self):
return (self.params/self.bse)**2
def remove_data(self):
super(self.__class__, self).remove_data()
#self.model.history['sresid'] = None
#self.model.history['weights'] = None
remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__
def summary(self, yname=None, xname=None, title=0, alpha=.05,
return_fmt='text'):
"""
This is for testing the new summary setup
"""
from statsmodels.iolib.summary import (summary_top,
summary_params, summary_return)
## left = [(i, None) for i in (
## 'Dependent Variable:',
## 'Model type:',
## 'Method:',
## 'Date:',
## 'Time:',
## 'Number of Obs:',
## 'df resid',
## 'df model',
## )]
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['IRLS']),
('Norm:', [self.fit_options['norm']]),
('Scale Est.:', [self.fit_options['scale_est']]),
('Cov Type:', [self.fit_options['cov']]),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if not title is None:
title = "Robust linear Model Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
#diagnostic table is not used yet
# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
#add warnings/notes, added to text format only
etext =[]
wstr = \
'''If the model instance has been used for another fit with different fit
parameters, then the fit options might not be the correct ones anymore .'''
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
def summary2(self, xname=None, yname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function for regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry
class RLMResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(RLMResultsWrapper, RLMResults)
if __name__=="__main__":
#NOTE: This is to be removed
#Delivery Time Data is taken from Montgomery and Peck
import statsmodels.api as sm
#delivery time(minutes)
endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,
79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,
9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])
#number of cases, distance (Feet)
exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,
7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,
605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,
150]])
exog = exog.T
exog = sm.add_constant(exog)
# model_ols = models.regression.OLS(endog, exog)
# results_ols = model_ols.fit()
# model_ramsaysE = RLM(endog, exog, M=norms.RamsayE())
# results_ramsaysE = model_ramsaysE.fit(update_scale=False)
# model_andrewWave = RLM(endog, exog, M=norms.AndrewWave())
# results_andrewWave = model_andrewWave.fit(update_scale=False)
# model_hampel = RLM(endog, exog, M=norms.Hampel(a=1.7,b=3.4,c=8.5)) # convergence problems with scale changed, not with 2,4,8 though?
# results_hampel = model_hampel.fit(update_scale=False)
#######################
### Stack Loss Data ###
#######################
from statsmodels.datasets.stackloss import load
data = load()
data.exog = sm.add_constant(data.exog)
#############
### Huber ###
#############
# m1_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber1 = m1_Huber.fit()
# m2_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber2 = m2_Huber.fit(cov="H2")
# m3_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber3 = m3_Huber.fit(cov="H3")
##############
### Hampel ###
##############
# m1_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel1 = m1_Hampel.fit()
# m2_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel2 = m2_Hampel.fit(cov="H2")
# m3_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel3 = m3_Hampel.fit(cov="H3")
################
### Bisquare ###
################
# m1_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare1 = m1_Bisquare.fit()
# m2_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare2 = m2_Bisquare.fit(cov="H2")
# m3_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare3 = m3_Bisquare.fit(cov="H3")
##############################################
# Huber's Proposal 2 scaling #
##############################################
################
### Huber'sT ###
################
m1_Huber_H = RLM(data.endog, data.exog, M=norms.HuberT())
results_Huber1_H = m1_Huber_H.fit(scale_est=scale.HuberScale())
# m2_Huber_H
# m3_Huber_H
# m4 = RLM(data.endog, data.exog, M=norms.HuberT())
# results4 = m1.fit(scale_est="Huber")
# m5 = RLM(data.endog, data.exog, M=norms.Hampel())
# results5 = m2.fit(scale_est="Huber")
# m6 = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results6 = m3.fit(scale_est="Huber")
# print """Least squares fit
#%s
#Huber Params, t = 2.
#%s
#Ramsay's E Params
#%s
#Andrew's Wave Params
#%s
#Hampel's 17A Function
#%s
#""" % (results_ols.params, results_huber.params, results_ramsaysE.params,
# results_andrewWave.params, results_hampel.params)
| bsd-3-clause |
maurofaccenda/ansible | lib/ansible/modules/cloud/smartos/imgadm.py | 68 | 10313 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: imgadm
short_description: Manage SmartOS images
description:
- Manage SmartOS virtual machine images through imgadm(1M)
version_added: "2.3"
author: Jasper Lievisse Adriaanse (@jasperla)
options:
force:
required: false
choices: [ yes, no ]
description:
- Force a given operation (where supported by imgadm(1M)).
pool:
required: false
default: zones
description:
- zpool to import to or delete images from.
source:
required: false
description:
- URI for the image source.
state:
required: true
choices: [ present, absent, deleted, imported, updated, vacuumed ]
description:
- State the object operated on should be in. C(imported) is an alias for
for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
and C(uuid) to C(*), it will remove all unused images.
type:
required: false
choices: [ imgapi, docker, dsapi ]
default: imgapi
description:
- Type for image sources.
uuid:
required: false
description:
- Image UUID. Can either be a full UUID or C(*) for all images.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: Import an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: imported
- name: Delete an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: deleted
- name: Update all images
imgadm:
uuid: '*'
state: updated
- name: Update a single image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: updated
- name: Add a source
imgadm:
source: 'https://datasets.project-fifo.net'
state: present
- name: Add a Docker source
imgadm:
source: 'https://docker.io'
type: docker
state: present
- name: Remove a source
imgadm:
source: 'https://docker.io'
state: absent
'''
RETURN = '''
source:
description: Source that is managed.
returned: When not managing an image.
type: string
sample: https://datasets.project-fifo.net
uuid:
description: UUID for an image operated on.
returned: When not managing an image source.
type: string
sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
state:
description: State of the target, after execution.
returned: success
type: string
sample: 'present'
'''
from ansible.module_utils.basic import AnsibleModule
import re
# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
# -E option to return any errors in JSON, the generated JSON does not play well
# with the JSON parsers of Python. The returned message contains '\n' as part of
# the stacktrace, which breaks the parsers.
class Imgadm(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.cmd = module.get_bin_path('imgadm', required=True)
self.changed = False
self.uuid = module.params['uuid']
# Since there are a number of (natural) aliases, prevent having to look
# them up everytime we operate on `state`.
if self.params['state'] in ['present', 'imported', 'updated']:
self.present = True
else:
self.present = False
# Perform basic UUID validation upfront.
if self.uuid and self.uuid != '*':
if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
# Helper method to massage stderr
def errmsg(self, stderr):
match = re.match('^imgadm .*?: error \(\w+\): (.*): .*', stderr)
if match:
return match.groups()[0]
else:
return 'Unexpected failure'
def update_images(self):
if self.uuid == '*':
cmd = '{0} update'.format(self.cmd)
else:
cmd = '{0} update {1}'.format(self.cmd, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
# There is no feedback from imgadm(1M) to determine if anything
# was actually changed. So treat this as an 'always-changes' operation.
# Note that 'imgadm -v' produces unparseable JSON...
self.changed = True
def manage_sources(self):
force = self.params['force']
source = self.params['source']
imgtype = self.params['type']
cmd = '{0} sources'.format(self.cmd)
if force:
cmd += ' -f'
if self.present:
cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
# Check the various responses.
# Note that trying to add a source with the wrong type is handled
# above as it results in a non-zero status.
regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
if re.match(regex, stdout):
self.changed = False
regex = 'Added "%s" image source "%s"' % (imgtype, source)
if re.match(regex, stdout):
self.changed = True
else:
# Type is ignored by imgadm(1M) here
cmd += ' -d %s' % (source)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
regex = 'Do not have image source "%s", no change' % (source)
if re.match(regex, stdout):
self.changed = False
regex = 'Deleted ".*" image source "%s"' % (source)
if re.match(regex, stdout):
self.changed = True
def manage_images(self):
pool = self.params['pool']
state = self.params['state']
if state == 'vacuumed':
# Unconditionally pass '--force', otherwise we're prompted with 'y/N'
cmd = '{0} vacuum -f'.format(self.cmd)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
else:
if stdout == '':
self.changed = False
else:
self.changed = True
if self.present:
cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
regex = 'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
if re.match(regex, stdout):
self.changed = False
regex = '.*ActiveImageNotFound.*'
if re.match(regex, stderr):
self.changed = False
regex = 'Imported image {0}.*'.format(self.uuid)
if re.match(regex, stdout.splitlines()[-1]):
self.changed = True
else:
cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
regex = '.*ImageNotInstalled.*'
if re.match(regex, stderr):
# Even if the 'rc' was non-zero (3), we handled the situation
# in order to determine if there was a change.
self.changed = False
regex = 'Deleted image {0}'.format(self.uuid)
if re.match(regex, stdout):
self.changed = True
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(default=None, type='bool'),
pool=dict(default='zones'),
source=dict(default=None),
state=dict(default=None, required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
uuid=dict(default=None)
),
# This module relies largely on imgadm(1M) to enforce idempotency, which does not
# provide a "noop" (or equivalent) mode to do a dry-run.
supports_check_mode=False,
)
imgadm = Imgadm(module)
uuid = module.params['uuid']
source = module.params['source']
state = module.params['state']
result = {'state': state}
# Either manage sources or images.
if source:
result['source'] = source
imgadm.manage_sources()
else:
result['uuid'] = uuid
if state == 'updated':
imgadm.update_images()
else:
# Make sure operate on a single image for the following actions
if (uuid == '*') and (state != 'vacuumed'):
module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
imgadm.manage_images()
result['changed'] = imgadm.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
google/learned_optimization | learned_optimization/research/hysteresis/data_in_state_tasks.py | 1 | 4624 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks with deterministic data that is managed throught the state variable."""
# pylint: disable=invalid-name
import functools
from typing import Any, Mapping, Tuple
import gin
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization import profile
from learned_optimization.tasks import base
from learned_optimization.tasks.datasets import base as datasets_base
import numpy as onp
import tensorflow_datasets as tfds
Params = Any
ModelState = Any
PRNGKey = jnp.ndarray
class _MLPImageTask(base.Task):
"""MLP based image task."""
def __init__(self,
datasetname,
hidden_sizes,
act_fn=jax.nn.relu,
dropout_rate=0.0):
super().__init__()
self.num_classes = 10
sizes = list(hidden_sizes) + [self.num_classes]
self.datasetname = datasetname
self.batch_size = 128
def _forward(inp):
inp = jnp.reshape(inp, [inp.shape[0], -1])
return hk.nets.MLP(
sizes, activation=act_fn)(
inp, dropout_rate=dropout_rate, rng=hk.next_rng_key())
self._mod = hk.transform(_forward)
def init_with_state(self, key: PRNGKey):
data = batch_from_idx(self.datasetname, (8, 8), "train", self.batch_size, 0)
key1, key2 = jax.random.split(key)
### random batch by sampling a large random value
start_batch = jax.random.randint(key1, [], 0, int(1e6))
return self._mod.init(key2, data["image"]), start_batch
def loss_with_state(self, params: Any, state: Any, key: jnp.ndarray,
data: Any):
data_idx = state
data = batch_from_idx(self.datasetname, (8, 8), "train", self.batch_size,
data_idx)
logits = self._mod.apply(params, key, data["image"])
labels = jax.nn.one_hot(data["label"], self.num_classes)
vec_loss = base.softmax_cross_entropy(logits=logits, labels=labels)
return jnp.mean(vec_loss), data_idx + 1
def loss_with_state_and_aux(self, params, state, key, data):
l, s = self.loss_with_state(params, state, key, data)
return l, s, {}
def normalizer(self, loss):
maxval = 1.5 * onp.log(self.num_classes)
loss = jnp.clip(loss, 0, maxval)
return jnp.nan_to_num(loss, nan=maxval, posinf=maxval, neginf=maxval)
@functools.lru_cache(None)
def all_data(datasetname, split, image_size, seed=0):
cfg = {
"image_size": image_size,
"stack_channels": 1,
"aug_flip_left_right": False,
"aug_flip_up_down": False,
"normalize_mean": None,
"normalize_std": None,
"convert_to_black_and_white": True,
}
with profile.Profile(f"tfds.load({datasetname})"):
dataset = datasets_base._cached_tfds_load( # pylint:disable=protected-access
datasetname,
split=split,
batch_size=-1)
data = tfds.as_numpy(datasets_base._image_map_fn(cfg, dataset)) # pylint:disable=protected-access
idx = onp.arange(data["image"].shape[0])
onp.random.RandomState(seed).shuffle(idx)
return jax.tree_util.tree_map(lambda x: jnp.asarray(x[idx]), data)
def batch_from_idx(datasetname, image_size, split, batch_size, idx, seed=0):
"""Deterministically get a batch of data with an offset of `idx`."""
with jax.ensure_compile_time_eval():
data = all_data(datasetname, split, image_size=image_size, seed=seed)
batches = data["image"].shape[0] // batch_size
idx = idx % batches
b = {}
b["image"] = jax.lax.dynamic_slice(data["image"], [idx * batch_size, 0, 0, 0],
[batch_size, 8, 8, 1])
b["label"] = jax.lax.dynamic_slice(data["label"], [idx * batch_size],
[batch_size])
return b
@gin.configurable
def DataInState_ImageMLP_Cifar10BW8_Relu32():
"""A 1 hidden layer, 32 unit MLP for 8x8 black and white cifar10."""
return _MLPImageTask("cifar10", [32])
@gin.configurable
def DataInState_ImageMLP_FashionMnist8_Relu32():
"""A 1 hidden layer, 32 hidden unit MLP designed for 8x8 fashion mnist."""
return _MLPImageTask("fashion_mnist", [32])
| apache-2.0 |
justincassidy/scikit-learn | examples/linear_model/plot_ols_3d.py | 347 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
quheng/scikit-learn | examples/linear_model/plot_ols_3d.py | 347 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
LUTAN/tensorflow | tensorflow/contrib/learn/python/learn/datasets/mnist.py | 1 | 9594 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape, seed=seed)
validation = DataSet(validation_images,
validation_labels,
dtype=dtype,
reshape=reshape,
seed=seed)
test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape, seed=seed)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist(train_dir='MNIST-data'):
return read_data_sets(train_dir)
| apache-2.0 |
micadeyeye/Blongo | django/contrib/gis/tests/test_geoip.py | 290 | 4204 | import os, unittest
from django.db import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.utils import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, basestring):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '130.80.29.3'
fqdn = 'chron.com'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.failIf(not isinstance(geom, GEOSGeometry))
lon, lat = (-95.3670, 29.7523)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
ridfrustum/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/tests/test_geoip.py | 290 | 4204 | import os, unittest
from django.db import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.utils import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, basestring):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '130.80.29.3'
fqdn = 'chron.com'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.failIf(not isinstance(geom, GEOSGeometry))
lon, lat = (-95.3670, 29.7523)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
hankcs/HanLP | hanlp/layers/dropout.py | 1 | 5436 | # -*- coding:utf-8 -*-
# Date: 2020-06-05 17:47
from typing import List
import torch
import torch.nn as nn
class WordDropout(nn.Module):
def __init__(self, p: float, oov_token: int, exclude_tokens: List[int] = None) -> None:
super().__init__()
self.oov_token = oov_token
self.p = p
if not exclude_tokens:
exclude_tokens = [0]
self.exclude = exclude_tokens
@staticmethod
def token_dropout(tokens: torch.LongTensor,
oov_token: int,
exclude_tokens: List[int],
p: float = 0.2,
training: float = True) -> torch.LongTensor:
"""During training, randomly replaces some of the non-padding tokens to a mask token with probability ``p``
Adopted from https://github.com/Hyperparticle/udify
Args:
tokens: The current batch of padded sentences with word ids
oov_token: The mask token
exclude_tokens: The tokens for padding the input batch
p: The probability a word gets mapped to the unknown token
training: Applies the dropout if set to ``True``
tokens: torch.LongTensor:
oov_token: int:
exclude_tokens: List[int]:
p: float: (Default value = 0.2)
training: float: (Default value = True)
Returns:
A copy of the input batch with token dropout applied
"""
if training and p > 0:
# This creates a mask that only considers unpadded tokens for mapping to oov
padding_mask = tokens.new_ones(tokens.size(), dtype=torch.bool)
for pad in exclude_tokens:
padding_mask &= (tokens != pad)
# Create a uniformly random mask selecting either the original words or OOV tokens
dropout_mask = (tokens.new_empty(tokens.size(), dtype=torch.float).uniform_() < p)
oov_mask = dropout_mask & padding_mask
oov_fill = tokens.new_empty(tokens.size(), dtype=torch.long).fill_(oov_token)
result = torch.where(oov_mask, oov_fill, tokens)
return result
else:
return tokens
def forward(self, tokens: torch.LongTensor) -> torch.LongTensor:
return self.token_dropout(tokens, self.oov_token, self.exclude, self.p, self.training)
class SharedDropout(nn.Module):
def __init__(self, p=0.5, batch_first=True):
super(SharedDropout, self).__init__()
self.p = p
self.batch_first = batch_first
def extra_repr(self):
s = f"p={self.p}"
if self.batch_first:
s += f", batch_first={self.batch_first}"
return s
def forward(self, x):
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p)
else:
mask = self.get_mask(x[0], self.p)
x *= mask.unsqueeze(1) if self.batch_first else mask
return x
@staticmethod
def get_mask(x, p):
mask = x.new_empty(x.shape).bernoulli_(1 - p)
mask = mask / (1 - p)
return mask
class IndependentDropout(nn.Module):
def __init__(self, p=0.5):
r"""
For :math:`N` tensors, they use different dropout masks respectively.
When :math:`N-M` of them are dropped, the remaining :math:`M` ones are scaled by a factor of :math:`N/M` to compensate,
and when all of them are dropped together, zeros are returned.
Copied from https://github.com/yzhangcs/parser/master/supar/modules/dropout.py.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
Examples:
>>> x, y = torch.ones(1, 3, 5), torch.ones(1, 3, 5)
>>> x, y = IndependentDropout()(x, y)
>>> x
tensor([[[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2.]]])
>>> y
tensor([[[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[0., 0., 0., 0., 0.]]])
"""
super(IndependentDropout, self).__init__()
self.p = p
def extra_repr(self):
return f"p={self.p}"
def forward(self, *items):
if self.training:
masks = [x.new_empty(x.shape[:2]).bernoulli_(1 - self.p)
for x in items]
total = sum(masks)
scale = len(items) / total.max(torch.ones_like(total))
masks = [mask * scale for mask in masks]
items = [item * mask.unsqueeze(dim=-1)
for item, mask in zip(items, masks)]
return items
class LockedDropout(nn.Module):
def __init__(self, dropout_rate=0.5):
super(LockedDropout, self).__init__()
self.dropout_rate = dropout_rate
def forward(self, x):
if not self.training or not self.dropout_rate:
return x
if x.dim() == 3:
mask = x.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate) / (1 - self.dropout_rate)
mask = mask.expand_as(x)
elif x.dim() == 2:
mask = torch.empty_like(x).bernoulli_(1 - self.dropout_rate) / (1 - self.dropout_rate)
else:
raise ValueError(f'Unsupported dim: {x.dim()}. Only 2d (T,C) or 3d (B,T,C) is supported')
return mask * x
| apache-2.0 |
mohamedkeid/Image-Captioning | eval.py | 1 | 2555 | import argparse
import etl
import helpers
import torch
import torchvision.models as models
from decoder import DecoderRNN
from language import Language
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
helpers.validate_path(args.path)
# Parse argument for input sentence
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
helpers.validate_path(args.path)
n_layers = 2
# Initialize models
lang = helpers.load_object('language')
encoder = models.vgg16(pretrained=True)
decoder = DecoderRNN('general', 512, lang.n_words, n_layers, dropout_p=0.)
# Load model parameters
decoder.load_state_dict(torch.load('data/decoder_params'))
decoder.attention.load_state_dict(torch.load('data/attention_params'))
# Move models to GPU
encoder.cuda()
decoder.cuda()
def evaluate(path, encoder, max_length=256):
input_variable = etl.get_image_from_path(path, encoder)
# Create starting vectors for decoder
decoder_input = Variable(torch.LongTensor([[0]]))
decoder_input = decoder_input.cuda()
decoder_context = Variable(torch.zeros(1, decoder.hidden_size))
decoder_context = decoder_context.cuda()
decoder_hidden = decoder.init_hidden()
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_context, decoder_hidden, decoder_attention = decoder(decoder_input,
decoder_context,
decoder_hidden,
input_variable)
decoder_attentions[di, :decoder_attention.size(2)] += decoder_attention.squeeze(0).squeeze(0).cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == Language.eos_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(lang.index2word[ni])
# Next input is chosen word
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda()
return decoded_words, decoder_attentions[:di + 1, :len(input_variable)]
output_words, decoder_attn = evaluate(args.path, encoder)
output_sentence = ' '.join(output_words)
print(output_sentence)
| mit |
justincassidy/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 412 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 252 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 252 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/metrics/pairwise.py | 103 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
edhuckle/statsmodels | examples/python/regression_diagnostics.py | 28 | 2876 |
## Regression diagnostics
# This example file shows how to use a few of the ``statsmodels`` regression diagnostic tests in a real-life context. You can learn about more tests and find out more information abou the tests here on the [Regression Diagnostics page.](http://statsmodels.sourceforge.net/stable/diagnostic.html)
#
# Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online ``statsmodels`` documentation. For presentation purposes, we use the ``zip(name,test)`` construct to pretty-print(short descriptions in the examples below.
# ## Estimate a regression model
from __future__ import print_function
from statsmodels.compat import lzip
import statsmodels
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
# Load data
url = 'http://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv'
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressaors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# ## Normality of the residuals
# Jarque-Bera test:
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(results.resid)
lzip(name, test)
# Omni test:
name = ['Chi^2', 'Two-tail probability']
test = sms.omni_normtest(results.resid)
lzip(name, test)
# ## Influence tests
#
# Once created, an object of class ``OLSInfluence`` holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5,:]
# Explore other options by typing ``dir(influence_test)``
#
# Useful information on leverage can also be plotted:
from statsmodels.graphics.regressionplots import plot_leverage_resid2
print(plot_leverage_resid2(results))
# Other plotting options can be found on the [Graphics page.](http://statsmodels.sourceforge.net/stable/graphics.html)
# ## Multicollinearity
#
# Condition number:
np.linalg.cond(results.model.exog)
# ## Heteroskedasticity tests
#
# Breush-Pagan test:
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breushpagan(results.resid, results.model.exog)
lzip(name, test)
# Goldfeld-Quandt test
name = ['F statistic', 'p-value']
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
# ## Linearity
#
# Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
name = ['t value', 'p value']
test = sms.linear_harvey_collier(results)
lzip(name, test)
| bsd-3-clause |
adammenges/statsmodels | examples/python/regression_diagnostics.py | 28 | 2876 |
## Regression diagnostics
# This example file shows how to use a few of the ``statsmodels`` regression diagnostic tests in a real-life context. You can learn about more tests and find out more information abou the tests here on the [Regression Diagnostics page.](http://statsmodels.sourceforge.net/stable/diagnostic.html)
#
# Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online ``statsmodels`` documentation. For presentation purposes, we use the ``zip(name,test)`` construct to pretty-print(short descriptions in the examples below.
# ## Estimate a regression model
from __future__ import print_function
from statsmodels.compat import lzip
import statsmodels
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
# Load data
url = 'http://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv'
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressaors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# ## Normality of the residuals
# Jarque-Bera test:
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(results.resid)
lzip(name, test)
# Omni test:
name = ['Chi^2', 'Two-tail probability']
test = sms.omni_normtest(results.resid)
lzip(name, test)
# ## Influence tests
#
# Once created, an object of class ``OLSInfluence`` holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5,:]
# Explore other options by typing ``dir(influence_test)``
#
# Useful information on leverage can also be plotted:
from statsmodels.graphics.regressionplots import plot_leverage_resid2
print(plot_leverage_resid2(results))
# Other plotting options can be found on the [Graphics page.](http://statsmodels.sourceforge.net/stable/graphics.html)
# ## Multicollinearity
#
# Condition number:
np.linalg.cond(results.model.exog)
# ## Heteroskedasticity tests
#
# Breush-Pagan test:
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breushpagan(results.resid, results.model.exog)
lzip(name, test)
# Goldfeld-Quandt test
name = ['F statistic', 'p-value']
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
# ## Linearity
#
# Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
name = ['t value', 'p value']
test = sms.linear_harvey_collier(results)
lzip(name, test)
| bsd-3-clause |
hankcs/HanLP | hanlp/datasets/qa/hotpotqa.py | 1 | 6170 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-03-20 19:46
from enum import Enum, auto
import torch
import ujson
from torch.nn.utils.rnn import pad_sequence
from hanlp.common.dataset import TransformableDataset
from hanlp_common.util import merge_list_of_dict
HOTPOT_QA_TRAIN = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_train_v1.1.json'
HOTPOT_QA_DISTRACTOR_DEV = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_distractor_v1.json'
HOTPOT_QA_FULLWIKI_DEV = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json'
class HotpotQADataset(TransformableDataset):
def load_file(self, filepath):
with open(filepath) as fd:
return ujson.load(fd)
class BuildGraph(object):
def __init__(self, dst='graph') -> None:
super().__init__()
self.dst = dst
def __call__(self, sample: dict):
sample[self.dst] = build_graph(sample)
return sample
def hotpotqa_collate_fn(samples):
batch = merge_list_of_dict(samples)
max_seq_len = len(max([x['graph'] for x in samples], key=len))
arc = torch.zeros([len(samples), max_seq_len, max_seq_len])
token_offset = torch.zeros([len(samples), max_seq_len], dtype=torch.long)
src_mask = torch.zeros([len(samples), max_seq_len], dtype=torch.bool)
sp_candidate_mask = torch.zeros([len(samples), max_seq_len], dtype=torch.bool)
sp_label = torch.zeros([len(samples), max_seq_len], dtype=torch.float)
# sp = torch.zeros([len(samples), max_seq_len], dtype=torch.bool)
tokens = []
offset = 0
for i, sample in enumerate(samples):
graph = sample['graph']
for j, u in enumerate(graph):
u: Vertex = u
for v in u.to:
v: Vertex = v
arc[i, v.id, u.id] = 1
arc[i, u.id, v.id] = 1
# record each vertex's token offset
token_offset[i, u.id] = offset
src_mask[i, u.id] = True
sp_candidate_mask[i, u.id] = u.is_sp_root_candidate()
sp_label[i, u.id] = u.is_sp_root()
offset += 1
tokens.extend(sample['token_id'])
seq_lengths = torch.LongTensor(list(map(len, tokens)))
tokens = [torch.LongTensor(x) for x in tokens]
tokens = pad_sequence(tokens, batch_first=True)
batch['adj'] = arc
batch['tokens'] = tokens
batch['src_mask'] = src_mask
batch['seq_lengths'] = seq_lengths
batch['token_offset'] = token_offset
batch['sp_candidate_mask'] = sp_candidate_mask
batch['sp_label'] = sp_label
return batch
def flat_sentence(sample: dict) -> dict:
sample['token'] = token = []
for sent in sample['parsed_sentences']:
token.append(['bos'] + [x.lower() for x in sent[0]])
return sample
def create_sp_label(sample: dict) -> dict:
sample['sp_label'] = sp_label = []
def label(title_, index_):
for t, i in sample['supporting_facts']:
if t == title_ and i == index_:
return 1
return 0
for context in sample['context']:
title, sents = context
for idx, sent in enumerate(sents):
sp_label.append(label(title, idx))
assert len(sample['supporting_facts']) == sum(sp_label)
return sample
class Type(Enum):
Q_ROOT = auto()
Q_WORD = auto()
SP_ROOT = auto()
SP_WORD = auto()
NON_SP_ROOT = auto()
NON_SP_WORD = auto()
DOCUMENT_TITLE = auto()
class Vertex(object):
def __init__(self, id, type: Type, text=None) -> None:
super().__init__()
self.id = id
self.type = type
if not text:
text = str(type).split('.')[-1]
self.text = text
self.to = []
self.rel = []
def connect(self, to, rel):
self.to.append(to)
self.rel.append(rel)
def __str__(self) -> str:
return f'{self.text} {self.id}'
def __hash__(self) -> int:
return self.id
def is_word(self):
return self.type in {Type.SP_WORD, Type.Q_WORD, Type.NON_SP_WORD}
def is_question(self):
return self.type in {Type.Q_ROOT, Type.Q_WORD}
def is_sp(self):
return self.type in {Type.SP_ROOT, Type.SP_WORD}
def is_sp_root(self):
return self.type in {Type.SP_ROOT}
def is_sp_root_candidate(self):
return self.type in {Type.SP_ROOT, Type.NON_SP_ROOT}
def build_graph(each: dict, debug=False):
raw_sents = []
raw_sents.append(each['question'])
sp_idx = set()
sp_sents = {}
for sp in each['supporting_facts']:
title, offset = sp
ids = sp_sents.get(title, None)
if ids is None:
sp_sents[title] = ids = set()
ids.add(offset)
idx = 1
for document in each['context']:
title, sents = document
raw_sents += sents
for i, s in enumerate(sents):
if title in sp_sents and i in sp_sents[title]:
sp_idx.add(idx)
idx += 1
assert idx == len(raw_sents)
parsed_sents = each['parsed_sentences']
assert len(raw_sents) == len(parsed_sents)
graph = []
for idx, (raw, sent) in enumerate(zip(raw_sents, parsed_sents)):
if debug:
if idx > 1 and idx not in sp_idx:
continue
offset = len(graph)
if idx == 0:
if debug:
print(f'Question: {raw}')
graph.append(Vertex(len(graph), Type.Q_ROOT))
else:
if debug:
if idx in sp_idx:
print(f'Supporting Fact: {raw}')
graph.append(Vertex(len(graph), Type.SP_ROOT if idx in sp_idx else Type.NON_SP_ROOT))
tokens, heads, deprels = sent
for t, h, d in zip(tokens, heads, deprels):
graph.append(
Vertex(len(graph), (Type.SP_WORD if idx in sp_idx else Type.NON_SP_WORD) if idx else Type.Q_WORD, t))
for i, (h, d) in enumerate(zip(heads, deprels)):
graph[offset + h].connect(graph[offset + i + 1], d)
q_root = graph[0]
for u in graph:
if u.type == Type.SP_ROOT or u.type == Type.NON_SP_ROOT:
q_root.connect(u, 'supporting fact?')
return graph
| apache-2.0 |
augustoppimenta/crab | scikits/crab/datasets/book_crossing.py | 10 | 5317 | """Caching loader for the Book-Crossing Dataset
The description of the dataset is available on the official website at:
http://www.informatik.uni-freiburg.de/~cziegler/BX/
Quoting the introduction:
Collected by Cai-Nicolas Ziegler in a 4-week crawl
(August / September 2004) from the Book-Crossing community
with kind permission from Ron Hornbaker, CTO of Humankind
Systems. Contains 278,858 users (anonymized but with
demographic information) providing 1,149,780 ratings
(explicit / implicit) about 271,379 books.
This dataset loader will download the dataset,
which its size is around 22 Mb compressed. Once
uncompressed the train set is around 130 MB.
The data is downloaded, extracted and cached in the '~/scikit_crab_data'
folder.
References
----------
Improving Recommendation Lists Through Topic Diversification,
Cai-Nicolas Ziegler, Sean M. McNee, Joseph A. Konstan, Georg Lausen;
Proceedings of the 14th International World Wide Web Conference (WWW '05),
May 10-14, 2005, Chiba, Japan.
"""
# Copyright (c) 2011 Marcel Caraciolo <marcel@muricoca.com>
# License: Simplified BSD
import os
import urllib
import logging
import zipfile
from os.path import dirname
from os.path import join
import numpy as np
from base import Bunch
import csv
logger = logging.getLogger(__name__)
URL = "http://www.informatik.uni-freiburg.de/~cziegler/BX/BX-CSV-Dump.zip"
ARCHIVE_NAME = "BX-CSV-Dump.zip"
def download_book_crossings(target_dir):
""" Download the book-crossing data and unzip it """
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(archive_path):
logger.warn("Downloading dataset from %s (77 MB)", URL)
opener = urllib.urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
source_zip = zipfile.ZipFile(archive_path, 'r')
archives = []
for name in source_zip.namelist():
if name.find('.csv') != -1:
source_zip.extract(name, target_dir)
archives.append(name)
source_zip.close()
os.remove(archive_path)
return archives
def load_bookcrossings(data_home=None, download_if_missing=True,
implicit=False):
"""
Load the filenames of the Book Crossings dataset
data_home: optional, default: None
Specify the storage folder for the datasets. If None,
all files is stored in '~/data subfolders.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
implicit: optional, False by default
If True, it will load the implicit ratings expressed by rating 0,
otherwise it will load the explicit ratings expressed by rating 1-10.
Examples
--------
>>> from os.path import join
>>> from os.path import dirname
>>> from scikits.crab.datasets.book_crossing import load_bookcrossings
>>> data_home = join(dirname(__file__), 'scikits/crab/datasets/tests/data/')
>>> books = load_bookcrossings(data_home)
>>> len(books.data)
26
>>> len(books.item_ids)
100
"""
if data_home:
if not os.path.exists(data_home):
os.makedirs(data_home)
else:
data_home = join(dirname(__file__), 'data/')
try:
if not os.path.exists(os.path.join(data_home, 'BX-Book-Ratings.csv')) \
and not open(os.path.join(data_home, 'BX-Books.csv')):
raise IOError
except Exception, e:
print 80 * '_'
print 'Loading files failed'
print 80 * '_'
print e
if download_if_missing:
print 'downloading the dataset...'
try:
download_book_crossings(data_home)
except:
raise Exception('FAIL: Problems during the download.')
print 'dataset downloaded.'
else:
raise IOError('Book-Crossing dataset not found')
#TO FIX: it is not working for np.loadtxt
#ratings_m = np.loadtxt(os.path.join(data_home, 'BX-Book-Ratings.csv'),
# delimiter=';', skiprows=1)
ratings_m = csv.reader(open(os.path.join(data_home,
'BX-Book-Ratings.csv')), delimiter=';')
ratings_m.next()
data_books = {}
if implicit:
for user_id, item_id, rating in ratings_m:
if rating == "0":
data_books.setdefault(user_id, {})
data_books[user_id][item_id] = True
else:
for user_id, item_id, rating in ratings_m:
rating = int(rating)
if rating != "0":
data_books.setdefault(user_id, {})
data_books[user_id][item_id] = int(rating)
#Read the titles
data_titles = np.loadtxt(os.path.join(data_home, 'BX-Books.csv'),
delimiter=';', usecols=(0, 1), dtype=str)
data_t = []
for item_id, label in data_titles:
data_t.append((item_id, label))
data_titles = dict(data_t)
fdescr = open(dirname(__file__) + '/descr/book-crossing.rst')
return Bunch(data=data_books, item_ids=data_titles,
user_ids=None, DESCR=fdescr.read())
| bsd-3-clause |
harshaneelhg/scikit-learn | sklearn/cluster/birch.py | 206 | 22706 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
tongwang01/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 4 | 59870 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(values=['1'], indices=[[0, 0]], shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(
tf.constant([[1], [2]]), num_epochs=num_epochs),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
labels = tf.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
labels = tf.constant([[1], [0], [0]])
return features, labels
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predictions, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predictions, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
tf.contrib.metrics.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
labels = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, labels
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
edhuckle/statsmodels | tools/hash_funcs.py | 27 | 1305 | """
A collection of utilities to see if new ReST files need to be automatically
generated from certain files in the project (examples, datasets).
"""
import os
from statsmodels.compat import cPickle
file_path = os.path.dirname(__file__)
def get_hash(f):
"""
Gets hexadmecimal md5 hash of a string
"""
import hashlib
m = hashlib.md5()
m.update(f)
return m.hexdigest()
def update_hash_dict(filehash, filename):
"""
Opens the pickled hash dictionary, adds an entry, and dumps it back.
"""
try:
with open(file_path+'/hash_dict.pickle','r') as f:
hash_dict = cPickle.load(f)
except IOError as err:
hash_dict = {}
hash_dict.update({filename : filehash})
with open(os.path.join(file_path,'hash_dict.pickle'),'w') as f:
cPickle.dump(hash_dict, f)
def check_hash(rawfile, filename):
"""
Returns True if hash does not match the previous one.
"""
try:
with open(file_path+'/hash_dict.pickle','r') as f:
hash_dict = cPickle.load(f)
except IOError as err:
hash_dict = {}
try:
checkhash = hash_dict[filename]
except:
checkhash = None
filehash = get_hash(rawfile)
if filehash == checkhash:
return False, None
return True, filehash
| bsd-3-clause |
harshaneelhg/scikit-learn | benchmarks/bench_covertype.py | 153 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
justincassidy/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 263 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/utils/tests/test_class_weight.py | 139 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/utils/tests/test_class_weight.py | 139 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
justincassidy/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 205 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |