repo_name
stringlengths 7
84
| path
stringlengths 5
184
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 978
477k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| mit |
ddf-project/DDF | python/ddf/util.py | 3 | 5533 | from __future__ import unicode_literals
import numpy as np
import json
import pandas as pd
from py4j import java_collections
"""
Mapping from DDF types to python types
"""
TYPE_MAPPING = {'integer': int,
'int': int,
'tinyint': int,
'smallint': int,
'bigint': long,
'long': long,
'double': float,
'float': float,
# TODO: Find a way to handle decimal better.
'decimal': float,
'boolean': bool,
'logical': bool,
'string': unicode,
'array': list,
'struct': object,
'timestamp': int,
'blob': object
}
NUMERIC_DDF_TYPES = ['int', 'tinyint', 'smallint', 'bigint', 'integer', 'double', 'float', 'decimal']
def is_numeric_ddf_type(t):
"""
Check if column type t is a numeric type in DDF's set of types
:param t: column type
:return: True/False
"""
return t.lower() in NUMERIC_DDF_TYPES
def to_bool(x):
"""
Try our best to make x into a boolean value
:param x: the value to be converted
:return: a boolean value
"""
if x is None:
return None
if isinstance(x, (bool, np.bool_)):
return x
if isinstance(x, (str, unicode)):
return x.lower() in ['yes', 'true']
if isinstance(x, (int, long)):
return x != 0
if isinstance(x, (float, np.float)):
return abs(x) > 1e-10
raise ValueError('Could not convert into bool with value {} of type {}'.format(x, type(x)))
def to_python_type(t):
tmp = t.split(".")
t = tmp[len(tmp) - 1]
return 'str' if t not in TYPE_MAPPING else TYPE_MAPPING[t]
def convert_column_types(df, column_types, raise_on_error=False):
"""
Convert a dataframe into data types specified in column_types
:param df: a data frame containing the sampled data
:type df: pd.DataFrame
:param column_types: the types of columns, in pE terminology
:param raise_on_error:
:return: a correctly typed data frame
"""
if len(df.columns) != len(column_types):
raise ValueError('Expect a list of column types of the same length with the number of columns in df')
for i, c in enumerate(df.columns):
dest_type = to_python_type(column_types[i])
if dest_type is bool:
df[c] = df[c].apply(to_bool)
elif dest_type is list or column_types[i] == 'struct':
# json
df[c] = df[c].apply(lambda x: json.loads(x) if isinstance(x, (str, unicode)) else x)
elif dest_type is float:
# NaN is encoded as "null"
df[c] = df[c].convert_objects(convert_numeric=True)
# convert type
df[c] = df[c].astype(dest_type, raise_on_error=raise_on_error)
return df
"""
Validating column arguments
"""
def validate_column_generic(col_names, column, get_name=True):
"""
Validate a column name or index, return the column name
:param col_names: list of column names
:param column: column name or index
:param get_name:
:return: column index
"""
if type(column) is int:
if column < 0 or column >= len(col_names):
raise ValueError('Column index out of range: {}'.format(column))
return col_names[column] if get_name else column
elif isinstance(column, (str, unicode)) and column in col_names:
return column if get_name else col_names.index(column)
raise ValueError('Column not found: {}'.format(column))
def parse_column_str(col_names, column):
"""
Validate a column name or index, return the column name
:param col_names: list of column names
:param column: column name or index
:return: column index
"""
return validate_column_generic(col_names, column, True)
def parse_column(col_names, column):
"""
Convert a column to index
:param col_names: list of column names
:param column: column name or index
:return: column index
"""
return validate_column_generic(col_names, column, False)
def to_java_array(ls, java_type, gateway_client):
"""
Convert python iterable into a java array
:param ls:
:param java_type:
:param gateway_client:
:return:
"""
arr = gateway_client.new_array(java_type, len(ls))
for i in range(0, len(ls)):
arr[i] = ls[i]
return arr
def to_java_list(ls, gateway_client):
"""
Convert a python list into java list
:param ls: python list to be converted
:param gateway_client: gateway client object
:return: java list
"""
return java_collections.ListConverter().convert([] if ls is None else ls, gateway_client._gateway_client)
def parse_ddf_data(rows, colnames, coltypes):
n = len(rows)
data = dict([(c, [None] * n) for c in colnames])
nulls = ("null", "NULL")
for i in range(0, n):
row = str(rows[i]).split('\t')
for j, c in enumerate(colnames):
value = row[j].replace('\\\\t', '\t')
if value not in nulls:
data[c][i] = value
return convert_column_types(pd.DataFrame(data=data, columns=colnames), coltypes, raise_on_error=False)
def parse_sql_result(java_result):
rows = java_result.getRows()
cols = java_result.getSchema().getColumns()
coltypes = [str(x.getType().toString()) for x in cols]
colnames = [str(x.getName()) for x in cols]
return parse_ddf_data(rows, colnames, coltypes)
| apache-2.0 |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/draft/__init__ copy 2.py | 4 | 6766 | # -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
import types
BaseModuleStr="ShareYourSystem.Standards.Controllers.Systemer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
SYS.addDo('Predicter','Predict','Predicting','Predicted')
#</DefineAugmentation>
#<ImportSpecificModules>
import scipy.stats
import numpy as np
from matplotlib import pyplot
#</ImportSpecificModules>
#<DefineLocals>
def getNullFloatsArray(_FloatsArray, _RtolFloat=1e-5):
u, s, v = np.linalg.svd(_FloatsArray)
RankInt = (s > _RtolFloat*s[0]).sum()
return v[RankInt:].T.copy()
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class PredicterClass(BaseClass):
def default_init(self,
_PredictingUnitsInt=0,
_PredictingSensorsInt=0,
_PredictingConstantTimeFloat=0.01,
_PredictingDecoderWeigtFloat=1.,
_PredictingNormalisationInt=1,
_PredictingCostFloat=1.,
_PredictingPerturbativeInputWeightFloat=0.1,
_PredictingPerturbativeLateralWeightFloat=0.1,
_PredictingInputStatStr='norm',
_PredictingInputRandomStatStr='norm',
_PredictingLateralRandomStatStr='norm',
_PredictedSensorJacobianFloatsArray=None,
_PredictedControlDecoderWeigthFloatsArray=None,
_PredictedExactDecoderWeigthFloatsArray=None,
_PredictedLeakWeigthFloatsArray=None,
_PredictedInputRandomFloatsArray=None,
_PredictedPerturbativeInputWeigthFloatsArray=None,
_PredictedNullFloatsArray=None,
_PredictedTotalPerturbativeInputWeigthFloatsArray=None,
_PredictedExactLateralWeigthFloatsArray=None,
_PredictedLeakExactLateralWeigthFloatsArray=None,
_PredictedLateralRandomFloatsArray=None,
_PredictedPerturbativeLateralWeigthFloatsArray=None,
_PredictedTotalPerturbativeLateralWeigthFloatsArray=None,
**_KwargVariablesDict
):
""" """
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_predict(self):
#/#################/#
# Sensor care : Prepare the input weigth and the null matrix
#
self.PredictedSensorJacobianFloatsArray=-np.diag(
(1./self.PredictingConstantTimeFloat)*np.ones(
self.PredictingSensorsInt
)
)
#debug
'''
self.debug(
[
'We have prepared the sensor jacobian',
('self.',self,['PredictedSensorJacobianFloatsArray'])
]
)
'''
#/#################/#
# Prepare the Decoders weigths
#
#Perturbative and exact
#random
self.PredictedExactDecoderWeigthFloatsArray=self.PredictingDecoderWeigtFloat*getattr(
scipy.stats,
self.PredictingInputRandomStatStr
).rvs(
size=(
self.PredictingSensorsInt,
self.PredictingUnitsInt
)
)/(self.PredictingUnitsInt**self.PredictingNormalisationInt)
#find the null space
self.PredictedNullFloatsArray=getNullFloatsArray(
self.PredictedExactDecoderWeigthFloatsArray
)
#debug
'''
PredictedProductArray=np.dot(
self.PredictedExactDecoderWeigthFloatsArray,
self.PredictedNullFloatsArray
)
self.debug(
[
('self.',self,[
'PredictedExactDecoderWeigthFloatsArray',
'PredictingUnitsInt'
]
),
("locals()['",locals(),['PredictedProductArray'],"']")
]
)
'''
#Control
#pinv
self.PredictedControlDecoderWeigthFloatsArray=np.linalg.pinv(
self.PredictedExactDecoderWeigthFloatsArray.T
)
#debug
'''
PredictedPinvFloatsArray=np.dot(
self.PredictedControlDecoderWeigthFloatsArray,
self.PredictedExactDecoderWeigthFloatsArray.T
)
self.debug(
[
'PredictedPinvFloatsArray is ',
str(PredictedPinvFloatsArray)
]
)
'''
#/#################/#
# Build the perturbative random matrices
#
#random
self.PredictedInputRandomFloatsArray=self.PredictingPerturbativeInputWeightFloat*getattr(
scipy.stats,
self.PredictingInputRandomStatStr
).rvs(
size=(
np.shape(self.PredictedNullFloatsArray)[1],
self.PredictingSensorsInt
)
)
#dot
self.PredictedPerturbativeInputWeigthFloatsArray=np.dot(
self.PredictedNullFloatsArray,
self.PredictedInputRandomFloatsArray
)
#/#################/#
# Build all the perturbative input
#
#sum
self.PredictedTotalPerturbativeInputWeigthFloatsArray=self.PredictedExactDecoderWeigthFloatsArray.T+self.PredictedPerturbativeInputWeigthFloatsArray
#/#################/#
# Build all the perturbative input
#
self.PredictedLeakWeigthFloatsArray=np.diag(np.ones(self.PredictingUnitsInt))
#/#################/#
# Build all the possible lateral connectivities
#
#Exact
#dot
self.PredictedExactLateralWeigthFloatsArray=np.dot(
self.PredictedExactDecoderWeigthFloatsArray.T,
self.PredictedExactDecoderWeigthFloatsArray
)
#add the leaky part to compensate
self.PredictedLeakExactLateralWeigthFloatsArray=self.PredictedExactLateralWeigthFloatsArray-(
1.-self.PredictingCostFloat)*np.diag(
np.ones(self.PredictingUnitsInt)
)
#Perturbative
#random
self.PredictedLateralRandomFloatsArray=self.PredictingPerturbativeLateralWeightFloat*getattr(
scipy.stats,
self.PredictingLateralRandomStatStr
).rvs(
size=(
np.shape(self.PredictedNullFloatsArray)[1],
self.PredictingUnitsInt
)
)
#dot
self.PredictedPerturbativeLateralWeigthFloatsArray=np.dot(
self.PredictedNullFloatsArray,
self.PredictedLateralRandomFloatsArray
)
#sum
self.PredictedTotalPerturbativeLateralWeigthFloatsArray=self.PredictedLeakExactLateralWeigthFloatsArray+self.PredictedPerturbativeLateralWeigthFloatsArray
#</DefineClass>
#</DefinePrint>
PredicterClass.PrintingClassSkipKeyStrsList.extend(
[
'PredictingUnitsInt',
'PredictingSensorsInt',
'PredictingConstantTimeFloat',
'PredictingDecoderWeigtFloat',
'PredictingNormalisationInt',
'PredictingCostFloat',
'PredictingPerturbativeInputWeightFloat',
'PredictingPerturbativeLateralWeightFloat',
'PredictingInputStatStr',
'PredictingInputRandomStatStr',
'PredictingLateralRandomStatStr',
'PredictedSensorJacobianFloatsArray',
'PredictedLeakWeigthFloatsArray',
'PredictedControlDecoderWeigthFloatsArray',
'PredictedExactDecoderWeigthFloatsArray',
'PredictedInputRandomFloatsArray',
'PredictedPerturbativeInputWeigthFloatsArray',
'PredictedNullFloatsArray',
'PredictedTotalPerturbativeInputWeigthFloatsArray',
'PredictedExactLateralWeigthFloatsArray',
'PredictedLeakExactLateralWeigthFloatsArray',
'PredictedLateralRandomFloatsArray',
'PredictedPerturbativeLateralWeigthFloatsArray',
'PredictedTotalPerturbativeLateralWeigthFloatsArray',
]
)
#<DefinePrint> | mit |
luxinator/Pore-Network-Generator | tools/pb_size_gen.py | 1 | 1053 | #!/usr/bin/python
'''
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
'''
__author__ = 'Lucas van Oosterhout'
import matplotlib.pyplot as plt
import numpy as np
mu, sigma = np.log(0.75e-4), 0.6
maxV, minV = 1.5e-4, 0.03e-4
s = np.random.lognormal(mu, sigma, 100000)
trunc = []
for v in s:
if minV < v < maxV:
trunc.append(v)
s = np.array(trunc)
#Plot
count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')
plt.xlabel('Pore body Size')
plt.ylabel('Frequency')
plt.title(r'Pb Size Distribution: $\mu={}$, $\sigma={}$'.format(np.e**mu, sigma))
plt.subplots_adjust(left=0.15)
plt.savefig('hist.pdf')
f = open('pb_size.txt', 'w')
for size in s:
f.write(str(size))
f.write('\n')
f.close()
f = open('pb_dist.txt', 'w')
f.write('mu: {}, sigma: {}\n'.format(np.exp(mu), sigma))
f.write('max: {}, min: {}\n'.format(maxV, minV))
f.write('nr of Pb_sizes: {}\n'.format(len(s)))
f.close()
| gpl-2.0 |
LiaoPan/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
wrobstory/seaborn | seaborn/categorical.py | 19 | 102299 | from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import desaturate, iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotter(object):
width = .8
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Conver the colors to a common rgb representation
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
# Desaturate a bit because these are patches
if saturation < 1:
colors = [desaturate(c, saturation) for c in colors]
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
# Assign object attributes
self.colors = colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], kws)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, kws):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(color=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(kws.get("boxprops", {}))
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(kws.get("whiskerprops", {}))
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(kws.get("capprops", {}))
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(kws.get("medianprops", {}))
for fly in artist_dict["fliers"]:
fly.update(dict(color=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(kws.get("flierprops", {}))
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
color=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
c=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _StripPlotter(_CategoricalPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_BoxPlotter):
def __init__(self):
pass
def plot(self, ax):
pass
class _CategoricalStatPlotter(_CategoricalPlotter):
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names)
def estimate_statistic(self, estimator, ci, n_boot):
if self.hue_names is None:
statistic = []
confint = []
else:
statistic = [[] for _ in self.plot_data]
confint = [[] for _ in self.plot_data]
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single layer of grouping
# --------------------------------------------
if self.plot_hues is None:
if self.plot_units is None:
stat_data = remove_na(group_data)
unit_data = None
else:
unit_data = self.plot_units[i]
have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)
stat_data = group_data[have]
unit_data = unit_data[have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic.append(np.nan)
else:
statistic.append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint.append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint.append(utils.ci(boots, ci))
# Option 2: we are grouping by a hue layer
# ----------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
if not self.plot_hues[i].size:
statistic[i].append(np.nan)
if ci is not None:
confint[i].append((np.nan, np.nan))
continue
hue_mask = self.plot_hues[i] == hue_level
if self.plot_units is None:
stat_data = remove_na(group_data[hue_mask])
unit_data = None
else:
group_units = self.plot_units[i]
have = pd.notnull(
np.c_[group_data, group_units]
).all(axis=1)
stat_data = group_data[hue_mask & have]
unit_data = group_units[hue_mask & have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic[i].append(np.nan)
else:
statistic[i].append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint[i].append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint[i].append(utils.ci(boots, ci))
# Save the resulting values for plotting
self.statistic = np.array(statistic)
self.confint = np.array(confint)
# Rename the value label to reflect the estimation
if self.value_label is not None:
self.value_label = "{}({})".format(estimator.__name__,
self.value_label)
def draw_confints(self, ax, at_group, confint, colors, **kws):
kws.setdefault("lw", mpl.rcParams["lines.linewidth"] * 1.8)
for at, (ci_low, ci_high), color in zip(at_group,
confint,
colors):
if self.orient == "v":
ax.plot([at, at], [ci_low, ci_high], color=color, **kws)
else:
ax.plot([ci_low, ci_high], [at, at], color=color, **kws)
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax, barpos, self.confint, errcolors)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors, lw=lw)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
zorder=z, lw=lw)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
categorical_data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
)
_categorical_docs.update(_facet_docs)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
if "names" in kwargs:
kwargs.pop("names")
warn = True
if "join_rm" in kwargs:
kwargs.pop("join_rm")
warn = True
msg = ("The boxplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips.sort("size"))
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
msg = ("The violinplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=True, orient=None, color=None, palette=None,
size=7, edgecolor="w", linewidth=1, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.update(dict(s=size ** 2, edgecolor=edgecolor, linewidth=linewidth))
if edgecolor == "gray":
kwargs["edgecolor"] = plotter.gray
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Control strip order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips.sort("size"))
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips, jitter=True)
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_categorical_docs)
def barplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips.sort("size"))
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
def pointplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips.sort("size"))
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
""").format(**_categorical_docs)
def countplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs):
estimator = len
ci = None
n_boot = 0
units = None
errcolor = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise TypeError("Cannot pass values for both `x` and `y`")
else:
raise TypeError("Must pass valus for either `x` or `y`")
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
{size}
{aspect}
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
{legend_out}
{share_xy}
{margin_titles}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
| bsd-3-clause |
jmetzen/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
themrmax/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 27 | 25397 | import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
| bsd-3-clause |
CVML/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/dtypes/test_common.py | 3 | 19448 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.dtypes import (DatetimeTZDtype, PeriodDtype,
CategoricalDtype, IntervalDtype)
import pandas.core.dtypes.common as com
import pandas.util.testing as tm
class TestPandasDtype(object):
# Passing invalid dtype, both as a string or object, must raise TypeError
# Per issue GH15520
def test_invalid_dtype_error(self):
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with tm.assert_raises_regex(TypeError, msg):
com.pandas_dtype(dtype)
valid_list = [object, 'float64', np.object_, np.dtype('object'), 'O',
np.float64, float, np.dtype('float64')]
for dtype in valid_list:
com.pandas_dtype(dtype)
def test_numpy_dtype(self):
for dtype in ['M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']:
assert com.pandas_dtype(dtype) == np.dtype(dtype)
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
assert com.pandas_dtype('U') == np.dtype('U')
assert com.pandas_dtype('S') == np.dtype('S')
def test_datetimetz_dtype(self):
for dtype in ['datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]']:
assert com.pandas_dtype(dtype) is DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
def test_categorical_dtype(self):
assert com.pandas_dtype('category') == CategoricalDtype()
def test_period_dtype(self):
for dtype in ['period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]']:
assert com.pandas_dtype(dtype) is PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
dtypes = dict(datetime_tz=com.pandas_dtype('datetime64[ns, US/Eastern]'),
datetime=com.pandas_dtype('datetime64[ns]'),
timedelta=com.pandas_dtype('timedelta64[ns]'),
period=PeriodDtype('D'),
integer=np.dtype(np.int64),
float=np.dtype(np.float64),
object=np.dtype(np.object),
category=com.pandas_dtype('category'))
@pytest.mark.parametrize('name1,dtype1',
list(dtypes.items()),
ids=lambda x: str(x))
@pytest.mark.parametrize('name2,dtype2',
list(dtypes.items()),
ids=lambda x: str(x))
def test_dtype_equal(name1, dtype1, name2, dtype2):
# match equal to self, but not equal to other
assert com.is_dtype_equal(dtype1, dtype1)
if name1 != name2:
assert not com.is_dtype_equal(dtype1, dtype2)
def test_dtype_equal_strict():
# we are strict on kind equality
for dtype in [np.int8, np.int16, np.int32]:
assert not com.is_dtype_equal(np.int64, dtype)
for dtype in [np.float32]:
assert not com.is_dtype_equal(np.float64, dtype)
# strict w.r.t. PeriodDtype
assert not com.is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
# strict w.r.t. datetime64
assert not com.is_dtype_equal(
com.pandas_dtype('datetime64[ns, US/Eastern]'),
com.pandas_dtype('datetime64[ns, CET]'))
# see gh-15941: no exception should be raised
assert not com.is_dtype_equal(None, None)
def get_is_dtype_funcs():
"""
Get all functions in pandas.core.dtypes.common that
begin with 'is_' and end with 'dtype'
"""
fnames = [f for f in dir(com) if (f.startswith('is_') and
f.endswith('dtype'))]
return [getattr(com, fname) for fname in fnames]
@pytest.mark.parametrize('func',
get_is_dtype_funcs(),
ids=lambda x: x.__name__)
def test_get_dtype_error_catch(func):
# see gh-15941
#
# No exception should be raised.
assert not func(None)
def test_is_object():
assert com.is_object_dtype(object)
assert com.is_object_dtype(np.array([], dtype=object))
assert not com.is_object_dtype(int)
assert not com.is_object_dtype(np.array([], dtype=int))
assert not com.is_object_dtype([1, 2, 3])
def test_is_sparse():
assert com.is_sparse(pd.SparseArray([1, 2, 3]))
assert com.is_sparse(pd.SparseSeries([1, 2, 3]))
assert not com.is_sparse(np.array([1, 2, 3]))
# This test will only skip if the previous assertions
# pass AND scipy is not installed.
sparse = pytest.importorskip("scipy.sparse")
assert not com.is_sparse(sparse.bsr_matrix([1, 2, 3]))
def test_is_scipy_sparse():
tm._skip_if_no_scipy()
from scipy.sparse import bsr_matrix
assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
def test_is_categorical():
cat = pd.Categorical([1, 2, 3])
assert com.is_categorical(cat)
assert com.is_categorical(pd.Series(cat))
assert com.is_categorical(pd.CategoricalIndex([1, 2, 3]))
assert not com.is_categorical([1, 2, 3])
def test_is_datetimetz():
assert not com.is_datetimetz([1, 2, 3])
assert not com.is_datetimetz(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimetz(s)
def test_is_period():
assert not com.is_period([1, 2, 3])
assert not com.is_period(pd.Index([1, 2, 3]))
assert com.is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime64_dtype():
assert not com.is_datetime64_dtype(object)
assert not com.is_datetime64_dtype([1, 2, 3])
assert not com.is_datetime64_dtype(np.array([], dtype=int))
assert com.is_datetime64_dtype(np.datetime64)
assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64))
def test_is_datetime64tz_dtype():
assert not com.is_datetime64tz_dtype(object)
assert not com.is_datetime64tz_dtype([1, 2, 3])
assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetime64tz_dtype(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_timedelta64_dtype():
assert not com.is_timedelta64_dtype(object)
assert not com.is_timedelta64_dtype([1, 2, 3])
assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64))
assert com.is_timedelta64_dtype(np.timedelta64)
assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
assert not com.is_timedelta64_dtype("0 days 00:00:00")
def test_is_period_dtype():
assert not com.is_period_dtype(object)
assert not com.is_period_dtype([1, 2, 3])
assert not com.is_period_dtype(pd.Period("2017-01-01"))
assert com.is_period_dtype(PeriodDtype(freq="D"))
assert com.is_period_dtype(pd.PeriodIndex([], freq="A"))
def test_is_interval_dtype():
assert not com.is_interval_dtype(object)
assert not com.is_interval_dtype([1, 2, 3])
assert com.is_interval_dtype(IntervalDtype())
interval = pd.Interval(1, 2, closed="right")
assert not com.is_interval_dtype(interval)
assert com.is_interval_dtype(pd.IntervalIndex([interval]))
def test_is_categorical_dtype():
assert not com.is_categorical_dtype(object)
assert not com.is_categorical_dtype([1, 2, 3])
assert com.is_categorical_dtype(CategoricalDtype())
assert com.is_categorical_dtype(pd.Categorical([1, 2, 3]))
assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
def test_is_string_dtype():
assert not com.is_string_dtype(int)
assert not com.is_string_dtype(pd.Series([1, 2]))
assert com.is_string_dtype(str)
assert com.is_string_dtype(object)
assert com.is_string_dtype(np.array(['a', 'b']))
def test_is_period_arraylike():
assert not com.is_period_arraylike([1, 2, 3])
assert not com.is_period_arraylike(pd.Index([1, 2, 3]))
assert com.is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime_arraylike():
assert not com.is_datetime_arraylike([1, 2, 3])
assert not com.is_datetime_arraylike(pd.Index([1, 2, 3]))
assert com.is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
def test_is_datetimelike():
assert not com.is_datetimelike([1, 2, 3])
assert not com.is_datetimelike(pd.Index([1, 2, 3]))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimelike(pd.PeriodIndex([], freq="A"))
assert com.is_datetimelike(np.array([], dtype=np.datetime64))
assert com.is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimelike(s)
def test_is_integer_dtype():
assert not com.is_integer_dtype(str)
assert not com.is_integer_dtype(float)
assert not com.is_integer_dtype(np.datetime64)
assert not com.is_integer_dtype(np.timedelta64)
assert not com.is_integer_dtype(pd.Index([1, 2.]))
assert not com.is_integer_dtype(np.array(['a', 'b']))
assert not com.is_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_integer_dtype(int)
assert com.is_integer_dtype(np.uint64)
assert com.is_integer_dtype(pd.Series([1, 2]))
def test_is_signed_integer_dtype():
assert not com.is_signed_integer_dtype(str)
assert not com.is_signed_integer_dtype(float)
assert not com.is_signed_integer_dtype(np.uint64)
assert not com.is_signed_integer_dtype(np.datetime64)
assert not com.is_signed_integer_dtype(np.timedelta64)
assert not com.is_signed_integer_dtype(pd.Index([1, 2.]))
assert not com.is_signed_integer_dtype(np.array(['a', 'b']))
assert not com.is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32))
assert not com.is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_signed_integer_dtype(int)
assert com.is_signed_integer_dtype(pd.Series([1, 2]))
def test_is_unsigned_integer_dtype():
assert not com.is_unsigned_integer_dtype(str)
assert not com.is_unsigned_integer_dtype(int)
assert not com.is_unsigned_integer_dtype(float)
assert not com.is_unsigned_integer_dtype(pd.Series([1, 2]))
assert not com.is_unsigned_integer_dtype(pd.Index([1, 2.]))
assert not com.is_unsigned_integer_dtype(np.array(['a', 'b']))
assert com.is_unsigned_integer_dtype(np.uint64)
assert com.is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
def test_is_int64_dtype():
assert not com.is_int64_dtype(str)
assert not com.is_int64_dtype(float)
assert not com.is_int64_dtype(np.int32)
assert not com.is_int64_dtype(np.uint64)
assert not com.is_int64_dtype(pd.Index([1, 2.]))
assert not com.is_int64_dtype(np.array(['a', 'b']))
assert not com.is_int64_dtype(np.array([1, 2], dtype=np.uint32))
assert com.is_int64_dtype(np.int64)
assert com.is_int64_dtype(np.array([1, 2], dtype=np.int64))
def test_is_int_or_datetime_dtype():
assert not com.is_int_or_datetime_dtype(str)
assert not com.is_int_or_datetime_dtype(float)
assert not com.is_int_or_datetime_dtype(pd.Index([1, 2.]))
assert not com.is_int_or_datetime_dtype(np.array(['a', 'b']))
assert com.is_int_or_datetime_dtype(int)
assert com.is_int_or_datetime_dtype(np.uint64)
assert com.is_int_or_datetime_dtype(np.datetime64)
assert com.is_int_or_datetime_dtype(np.timedelta64)
assert com.is_int_or_datetime_dtype(pd.Series([1, 2]))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.datetime64))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64))
def test_is_datetime64_any_dtype():
assert not com.is_datetime64_any_dtype(int)
assert not com.is_datetime64_any_dtype(str)
assert not com.is_datetime64_any_dtype(np.array([1, 2]))
assert not com.is_datetime64_any_dtype(np.array(['a', 'b']))
assert com.is_datetime64_any_dtype(np.datetime64)
assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64))
assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_datetime64_ns_dtype():
assert not com.is_datetime64_ns_dtype(int)
assert not com.is_datetime64_ns_dtype(str)
assert not com.is_datetime64_ns_dtype(np.datetime64)
assert not com.is_datetime64_ns_dtype(np.array([1, 2]))
assert not com.is_datetime64_ns_dtype(np.array(['a', 'b']))
assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64))
# This datetime array has the wrong unit (ps instead of ns)
assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]"))
assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_timedelta64_ns_dtype():
assert not com.is_timedelta64_ns_dtype(np.dtype('m8[ps]'))
assert not com.is_timedelta64_ns_dtype(
np.array([1, 2], dtype=np.timedelta64))
assert com.is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
def test_is_datetime_or_timedelta_dtype():
assert not com.is_datetime_or_timedelta_dtype(int)
assert not com.is_datetime_or_timedelta_dtype(str)
assert not com.is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
assert not com.is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
assert com.is_datetime_or_timedelta_dtype(np.datetime64)
assert com.is_datetime_or_timedelta_dtype(np.timedelta64)
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.timedelta64))
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.datetime64))
def test_is_numeric_v_string_like():
assert not com.is_numeric_v_string_like(1, 1)
assert not com.is_numeric_v_string_like(1, "foo")
assert not com.is_numeric_v_string_like("foo", "foo")
assert not com.is_numeric_v_string_like(np.array([1]), np.array([2]))
assert not com.is_numeric_v_string_like(
np.array(["foo"]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array([1]), "foo")
assert com.is_numeric_v_string_like("foo", np.array([1]))
assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
def test_is_datetimelike_v_numeric():
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_numeric(1, 1)
assert not com.is_datetimelike_v_numeric(dt, dt)
assert not com.is_datetimelike_v_numeric(np.array([1]), np.array([2]))
assert not com.is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), 1)
assert com.is_datetimelike_v_numeric(np.array([1]), dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
def test_is_datetimelike_v_object():
obj = object()
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_object(dt, dt)
assert not com.is_datetimelike_v_object(obj, obj)
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([1]))
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([dt]))
assert not com.is_datetimelike_v_object(np.array([obj]), np.array([obj]))
assert com.is_datetimelike_v_object(dt, obj)
assert com.is_datetimelike_v_object(obj, dt)
assert com.is_datetimelike_v_object(np.array([dt]), obj)
assert com.is_datetimelike_v_object(np.array([obj]), dt)
assert com.is_datetimelike_v_object(np.array([dt]), np.array([obj]))
def test_needs_i8_conversion():
assert not com.needs_i8_conversion(str)
assert not com.needs_i8_conversion(np.int64)
assert not com.needs_i8_conversion(pd.Series([1, 2]))
assert not com.needs_i8_conversion(np.array(['a', 'b']))
assert com.needs_i8_conversion(np.datetime64)
assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
assert com.needs_i8_conversion(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_numeric_dtype():
assert not com.is_numeric_dtype(str)
assert not com.is_numeric_dtype(np.datetime64)
assert not com.is_numeric_dtype(np.timedelta64)
assert not com.is_numeric_dtype(np.array(['a', 'b']))
assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64))
assert com.is_numeric_dtype(int)
assert com.is_numeric_dtype(float)
assert com.is_numeric_dtype(np.uint64)
assert com.is_numeric_dtype(pd.Series([1, 2]))
assert com.is_numeric_dtype(pd.Index([1, 2.]))
def test_is_string_like_dtype():
assert not com.is_string_like_dtype(object)
assert not com.is_string_like_dtype(pd.Series([1, 2]))
assert com.is_string_like_dtype(str)
assert com.is_string_like_dtype(np.array(['a', 'b']))
def test_is_float_dtype():
assert not com.is_float_dtype(str)
assert not com.is_float_dtype(int)
assert not com.is_float_dtype(pd.Series([1, 2]))
assert not com.is_float_dtype(np.array(['a', 'b']))
assert com.is_float_dtype(float)
assert com.is_float_dtype(pd.Index([1, 2.]))
def test_is_bool_dtype():
assert not com.is_bool_dtype(int)
assert not com.is_bool_dtype(str)
assert not com.is_bool_dtype(pd.Series([1, 2]))
assert not com.is_bool_dtype(np.array(['a', 'b']))
assert com.is_bool_dtype(bool)
assert com.is_bool_dtype(np.bool)
assert com.is_bool_dtype(np.array([True, False]))
def test_is_extension_type():
assert not com.is_extension_type([1, 2, 3])
assert not com.is_extension_type(np.array([1, 2, 3]))
assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3]))
cat = pd.Categorical([1, 2, 3])
assert com.is_extension_type(cat)
assert com.is_extension_type(pd.Series(cat))
assert com.is_extension_type(pd.SparseArray([1, 2, 3]))
assert com.is_extension_type(pd.SparseSeries([1, 2, 3]))
assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_extension_type(s)
# This test will only skip if the previous assertions
# pass AND scipy is not installed.
sparse = pytest.importorskip("scipy.sparse")
assert not com.is_extension_type(sparse.bsr_matrix([1, 2, 3]))
def test_is_complex_dtype():
assert not com.is_complex_dtype(int)
assert not com.is_complex_dtype(str)
assert not com.is_complex_dtype(pd.Series([1, 2]))
assert not com.is_complex_dtype(np.array(['a', 'b']))
assert com.is_complex_dtype(np.complex)
assert com.is_complex_dtype(np.array([1 + 1j, 5]))
| mit |
gajduk/network-inference-from-short-time-series-gajduk | src/methods/symbolic.py | 1 | 1549 | from itertools import permutations
from numpy import argsort, matrix, zeros
from sklearn.metrics import normalized_mutual_info_score
def one_pair_at_a_time_symbol(method):
def inner(instance):
x = matrix(instance.x)
n_nodes, n_time_points = x.shape
if n_time_points > 12:
delta = 6
else:
delta = int(n_time_points / 2)
res = zeros((n_nodes, n_nodes))
s, lenP = convertToSymbolSequence(x, delta)
for idx_node1 in range(n_nodes):
for idx_node2 in range(n_nodes):
res[idx_node1][idx_node2] = method(s[idx_node1, :].tolist()[0], s[idx_node2, :].tolist()[0], lenP)
return res
return inner
def chooseK(items, k):
if k == 0:
yield []
else:
for i in xrange(len(items)):
for cc in chooseK(items[i + 1:], k - 1):
yield [items[i]] + cc
def convertToSymbolSequence(x, delta):
res = []
l = range(delta)
P = {tuple(permutation): i for i, permutation in enumerate(permutations(l))}
for x_row in x:
symbols = [P[tuple(argsort(temp).tolist())] for temp in chooseK(x_row.tolist()[0], delta)]
res.append(symbols)
return matrix(res), len(P)
@one_pair_at_a_time_symbol
def symbolSequenceSimilarity(s1, s2, lenP):
p1, p2 = 0, 0
for i in range(len(s1)):
p1 += 1 if s1[i] == s2[i] else 0
p2 += 1 if s1[i] == lenP - s2[i] else 0
return max(p1, p2) * 1.0 / len(s1)
@one_pair_at_a_time_symbol
def mutualInformationOfSymbols(s1, s2, lenP):
return normalized_mutual_info_score(s1, s2)
if __name__ == "__main__":
x = matrix([[0.5, 0.7, 0.4, 0.6], [.1, .5, .7, .0]])
print mutualInformationOfSymbols(x)
| mit |
bykoianko/omim | tools/python/booking_hotels_quality.py | 20 | 2632 | #!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import namedtuple, defaultdict
from datetime import datetime
from sklearn import metrics
import argparse
import base64
import json
import logging
import matplotlib.pyplot as plt
import os
import pickle
import time
import urllib2
import re
# init logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
def load_binary_list(path):
"""Loads reference binary classifier output. """
bits = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
bits.append(1 if line.startswith('y') else 0)
return bits
def load_score_list(path):
"""Loads list of matching scores. """
scores = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
scores.append(float(re.search(r'result score: (\d*\.\d+)', line).group(1)))
return scores
def process_options():
# TODO(mgsergio): Fix description.
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--reference_list", dest="reference_list", help="Path to data files")
parser.add_argument("--sample_list", dest="sample_list", help="Name and destination for output file")
parser.add_argument("--show", dest="show", default=False, action="store_true",
help="Show graph for precision and recall")
options = parser.parse_args()
if not options.reference_list or not options.sample_list:
parser.print_help()
exit()
return options
def main():
options = process_options()
reference = load_binary_list(options.reference_list)
sample = load_score_list(options.sample_list)
precision, recall, threshold = metrics.precision_recall_curve(reference, sample)
aa = zip(precision, recall, threshold)
max_by_hmean = max(aa, key=lambda (p, r, t): p*r/(p+r))
print("Optimal threshold: {2} for precision: {0} and recall: {1}".format(*max_by_hmean))
print("AUC: {0}".format(metrics.roc_auc_score(reference, sample)))
if options.show:
plt.plot(recall, precision)
plt.title("Precision/Recall")
plt.ylabel("Precision")
plt.xlabel("Recall")
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/tseries/offsets/test_offsets.py | 1 | 130943 | from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
import pytest
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.series import Series
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.frequencies import (get_freq_code, get_freq_str,
INVALID_FREQ_ERR_MSG)
from pandas.tseries.frequencies import _offset_map, get_offset
from pandas.core.indexes.datetimes import (
_to_m8, DatetimeIndex, _daterange_cache)
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import WeekDay, CacheableOffset
from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd,
BusinessHour, WeekOfMonth, CBMonthEnd,
CustomBusinessHour,
CBMonthBegin, BYearEnd, MonthEnd,
MonthBegin, SemiMonthBegin, SemiMonthEnd,
BYearBegin, QuarterBegin, BQuarterBegin,
BMonthBegin, DateOffset, Week, YearBegin,
YearEnd, Day,
QuarterEnd, BusinessMonthEnd, FY5253,
Nano, Easter, FY5253Quarter,
LastWeekOfMonth, Tick)
from pandas.core.tools.datetimes import format, ole2datetime
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas._libs.tslibs import timezones
from pandas._libs.tslib import NaT, Timestamp
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.tseries.holiday import USFederalHolidayCalendar
from .common import assert_offset_equal, assert_onOffset
####
# Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
with pytest.raises(ValueError):
ole2datetime(60)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
assert isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
d = Timestamp(datetime(2008, 1, 2))
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253:
klass = klass(n=value, startingMonth=1, weekday=1,
variation='last', normalize=normalize)
elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except Exception:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self, tz):
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
# Check tz is preserved
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except tslib.OutOfBoundsDatetime:
raise
except (ValueError, KeyError):
# we are creating an invalid offset
# so ignore
pass
def test_offsets_compare_equal(self):
# root cause of GH#456: __ne__ was not implemented
if self._offset is None:
return
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
assert offset1 == offset2
def test_rsub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def test_radd(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d + self.offset2 == self.offset2 + self.d
def test_sub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-2)
assert self.d - self.offset2 == self.d - (2 * off - off)
def testMult1(self):
if self._offset is None or not hasattr(self, "offset1"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset1 attr
return
assert self.d + 10 * self.offset1 == self.d + self._offset(10)
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
if self._offset is None:
return
assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_immutable(self, offset_types):
# GH#21341 check that __setattr__ raises
offset = self._get_offset(offset_types)
with pytest.raises(AttributeError):
offset.normalize = True
with pytest.raises(AttributeError):
offset.n = 91
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_freqstr(self, offset_types):
offset = self._get_offset(offset_types)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: days=1>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
if normalize and issubclass(offset, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = timezones.maybe_get_tz(tz)
dt_tz = conversion.localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = self.expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset_types, 'apply', dt, expected,
normalize=True)
def test_rollforward(self, offset_types):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self, offset_types):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected, normalize=True)
def test_onOffset(self, offset_types):
dt = self.expecteds[offset_types.__name__]
offset_s = self._get_offset(offset_types)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
if issubclass(offset_types, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_n = self._get_offset(offset_types, normalize=True)
assert not offset_n.onOffset(dt)
if offset_types in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
return
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self, offset_types, tz):
dt = datetime(2011, 1, 1, 9, 0)
offset_s = self._get_offset(offset_types)
expected = self.expecteds[offset_types.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True, disallowed for Tick subclasses GH#21427
if issubclass(offset_types, Tick):
return
offset_s = self._get_offset(offset_types, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self, datapath):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = datapath('tseries', 'offsets', 'data',
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((BDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def test_sub(self):
# we have to override test_sub here becasue self.offset2 is not
# defined as self._offset(2)
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((BusinessHour(normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((BusinessHour(-1, normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
opening_time_cases = []
# opening time should be affected by sign of n, not by n's value and
# end
opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
BusinessHour(n=4), BusinessHour(end='10:00'),
BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9))}))
opening_time_cases.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15',
end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17)), }))
opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
@pytest.mark.parametrize('case', opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
apply_cases = []
apply_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((BusinessHour(4), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
apply_cases.append((BusinessHour(-1), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
apply_cases.append((BusinessHour(-4), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
# A week later
apply_large_n_cases.append((BusinessHour(40), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
# 3 days and 1 hour before
apply_large_n_cases.append((BusinessHour(-25), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
# 5 days and 3 hours later
apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_sub(self):
# override the Base.test_sub implementation because self.offset2 is
# defined differently in this class than the test expects
pass
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((
CustomBusinessHour(normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((
CustomBusinessHour(1, normalize=True,
start='17:00', end='04:00',
holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('norm_cases', normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
apply_cases = []
apply_cases.append((
CustomBusinessHour(holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((
CustomBusinessHour(4, holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
@pytest.mark.parametrize('apply_case', apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
nano_cases = []
nano_cases.append(
(CustomBusinessHour(holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5):
Timestamp('2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5):
Timestamp('2014-07-01 17:00') - Nano(5)}))
nano_cases.append(
(CustomBusinessHour(-1, holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5):
Timestamp('2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5):
Timestamp('2014-06-26 17:00') - Nano(5)}))
@pytest.mark.parametrize('nano_case', nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((CDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self, datapath):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle')
cday0_14_1 = read_pickle(pth)
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
apply_cases.append((2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
apply_cases.append((-CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
apply_cases.append((-2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
apply_cases.append((CBMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
apply_cases = []
apply_cases.append((CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
apply_cases.append((2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
apply_cases.append((-CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
apply_cases.append((-2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
apply_cases.append((CBMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
class TestWeek(Base):
_offset = Week
d = Timestamp(datetime(2008, 1, 2))
offset1 = _offset()
offset2 = _offset(2)
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
with pytest.raises(ValueError):
Week(weekday=7)
with pytest.raises(ValueError, match="Day must be"):
Week(weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
offset_cases = []
# not business week
offset_cases.append((Week(), {
datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# Mon
offset_cases.append((Week(weekday=0), {
datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(0, weekday=0), {
datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(-2, weekday=1), {
datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('weekday', range(7))
def test_onOffset(self, weekday):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assert_onOffset(offset, date, expected)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=4, weekday=0)
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=-1, weekday=0)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 0, 1, date2, datetime(2011, 2, 1)),
(0, 0, 1, date3, datetime(2011, 2, 1)),
(0, 0, 1, date4, datetime(2011, 2, 1)),
(0, 1, 1, date1, datetime(2011, 1, 11)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 1, 1, date3, datetime(2011, 2, 8)),
(0, 1, 1, date4, datetime(2011, 2, 8)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 2, 1, date3, datetime(2011, 1, 18)),
(0, 3, 1, date4, datetime(2011, 1, 25)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assert_offset_equal(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
week, weekday, dt, expected = case
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^N cannot be 0"):
LastWeekOfMonth(n=0, weekday=1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
on_offset_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
weekday, dt, expected = case
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
offset_cases.append((SemiMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
offset_cases.append((SemiMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
offset_cases.append((SemiMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
offset_cases.append((SemiMonthBegin(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_vectorized_offset_addition(self, klass, assert_func):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assert_offset_equal(-Easter(2),
datetime(2011, 1, 1),
datetime(2009, 4, 12))
assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assert_offset_equal(-Easter(2),
datetime(2010, 4, 4),
datetime(2008, 3, 23))
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset(name)
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == get_freq_str(code)
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(object):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setup_method(self, method):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
assert not inst1._should_cache(), cls
return
assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
def test_should_cache_bmonth_end(self):
assert not BusinessMonthEnd()._should_cache()
def test_should_cache_week_month(self):
assert not WeekOfMonth(weekday=1, week=2)._should_cache()
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=MonthEnd(), normalize=True)
assert not MonthEnd() in _daterange_cache
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=BusinessMonthEnd(), normalize=True)
assert not BusinessMonthEnd() in _daterange_cache
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
assert inst2 not in _daterange_cache
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the singular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of singular offsets, we don't necessarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}.items()
@pytest.mark.parametrize('tup', offset_classes)
def test_all_offset_classes(self, tup):
offset, test_values = tup
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
# ---------------------------------------------------------------------
def test_get_offset_day_error():
# subclass of _BaseOffset must override _day_opt attribute, or we should
# get a NotImplementedError
with pytest.raises(NotImplementedError):
DateOffset()._get_offset_day(datetime.now())
def test_valid_default_arguments(offset_types):
# GH#19142 check that the calling the constructors without passing
# any keyword arguments produce valid offsets
cls = offset_types
cls()
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
def test_validate_n_error():
with pytest.raises(TypeError):
DateOffset(n='Doh!')
with pytest.raises(TypeError):
MonthBegin(n=timedelta(1))
with pytest.raises(TypeError):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
with pytest.raises(ValueError):
cls(n=1.5)
def test_tick_normalize_raises(tick_classes):
# check that trying to create a Tick object with normalize=True raises
# GH#21427
cls = tick_classes
with pytest.raises(ValueError):
cls(n=3, normalize=True)
def test_weeks_onoffset():
# GH#18510 Week with weekday = None, normalize = False should always
# be onOffset
offset = Week(n=2, weekday=None)
ts = Timestamp('1862-01-13 09:03:34.873477378+0210', tz='Africa/Lusaka')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = Week(n=2, weekday=None)
ts = Timestamp('1856-10-24 16:18:36.556360110-0717', tz='Pacific/Easter')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_weekofmonth_onoffset():
# GH#18864
# Make sure that nanoseconds don't trip up onOffset (and with it apply)
offset = WeekOfMonth(n=2, week=2, weekday=0)
ts = Timestamp('1916-05-15 01:14:49.583410462+0422', tz='Asia/Qyzylorda')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = WeekOfMonth(n=-3, week=1, weekday=0)
ts = Timestamp('1980-12-08 03:38:52.878321185+0500', tz='Asia/Oral')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_last_week_of_month_on_offset():
# GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth
offset = LastWeekOfMonth(n=4, weekday=6)
ts = Timestamp('1917-05-27 20:55:27.084284178+0200',
tz='Europe/Warsaw')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
# negative n
offset = LastWeekOfMonth(n=-4, weekday=5)
ts = Timestamp('2005-08-27 05:01:42.799392561-0500',
tz='America/Rainy_River')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
| bsd-3-clause |
ebernhardson/l2r | code/data_augment_es_docs.py | 1 | 1880 | import pandas as pd
import requests
import grequests
import json
import progressbar
import config
from utils import np_utils, table_utils
def exception_handler(req, e):
raise e
def main():
# returns an ndarray
page_ids = table_utils._read(config.CLICK_DATA)['hit_page_id'].unique()
url = config.ES_URL + '/page/_mget'
params = {'fields': ','.join(config.ES_DOC_FIELDS)}
defaults = config.ES_DOC_FIELDS_DEFAULTS
multi_value_fields = [k for k, v in defaults.iteritems() if isinstance(v, tuple)]
docs = table_utils._open_shelve_write(config.ES_PAGE_DOCS_SHELVE)
i = 0
try:
with progressbar.ProgressBar(max_value=len(page_ids)) as bar:
for top_batch in np_utils._split(page_ids, 10000):
session = requests.Session()
reqs = []
for batch in np_utils._split(top_batch, 100):
data = json.dumps({'ids': list(batch)})
reqs.append(grequests.post(url, data=data, params=params, session=session))
for r in grequests.imap(reqs, size=20, exception_handler=exception_handler):
found = json.loads(r.text)['docs']
for d in found:
if not d['found']:
continue
res = defaults.copy()
for field, v in d['fields'].iteritems():
# ES alwards returns a list, even if there is only one item.
# Flatten down single valued fields
res[field] = tuple(v) if field in multi_value_fields else v[0]
docs[str(d['_id'])] = res
i += len(found)
bar.update(i)
docs.sync()
finally:
docs.close()
if __name__ == "__main__":
main()
| mit |
gdementen/larray | larray/inout/hdf.py | 2 | 6860 | from __future__ import absolute_import, print_function
import warnings
import numpy as np
from pandas import HDFStore
from larray.core.array import Array
from larray.core.axis import Axis
from larray.core.constants import nan
from larray.core.group import Group, LGroup, _translate_group_key_hdf
from larray.core.metadata import Metadata
from larray.util.misc import LHDFStore
from larray.inout.session import register_file_handler
from larray.inout.common import FileHandler
from larray.inout.pandas import df_asarray
from larray.example import get_example_filepath
def read_hdf(filepath_or_buffer, key, fill_value=nan, na=nan, sort_rows=False, sort_columns=False,
name=None, **kwargs):
r"""Reads an axis or group or array named key from a HDF5 file in filepath (path+name)
Parameters
----------
filepath_or_buffer : str or pandas.HDFStore
Path and name where the HDF5 file is stored or a HDFStore object.
key : str or Group
Name of the array.
fill_value : scalar or Array, optional
Value used to fill cells corresponding to label combinations which are not present in the input.
Defaults to NaN.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically.
Must be False if the read array has been dumped with an larray version >= 0.30.
Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically.
Must be False if the read array has been dumped with an larray version >= 0.30.
Defaults to False.
name : str, optional
Name of the axis or group to return. If None, name is set to passed key.
Defaults to None.
Returns
-------
Array
Examples
--------
>>> fname = get_example_filepath('examples.h5')
Read array by passing its identifier (key) inside the HDF file
>>> # The data below is derived from a subset of the demo_pjan table from Eurostat
>>> read_hdf(fname, 'pop') # doctest: +SKIP
country gender\time 2013 2014 2015
Belgium Male 5472856 5493792 5524068
Belgium Female 5665118 5687048 5713206
France Male 31772665 32045129 32174258
France Female 33827685 34120851 34283895
Germany Male 39380976 39556923 39835457
Germany Female 41142770 41210540 41362080
"""
if not np.isnan(na):
fill_value = na
warnings.warn("read_hdf `na` argument has been renamed to `fill_value`. Please use that instead.",
FutureWarning, stacklevel=2)
key = _translate_group_key_hdf(key)
res = None
with LHDFStore(filepath_or_buffer) as store:
pd_obj = store.get(key)
attrs = store.get_storer(key).attrs
writer = attrs.writer if 'writer' in attrs else None
# for backward compatibility but any object read from an hdf file should have an attribute 'type'
_type = attrs.type if 'type' in attrs else 'Array'
_meta = attrs.metadata if 'metadata' in attrs else None
if _type == 'Array':
# cartesian product is not necessary if the array was written by LArray
cartesian_prod = writer != 'LArray'
res = df_asarray(pd_obj, sort_rows=sort_rows, sort_columns=sort_columns, fill_value=fill_value,
parse_header=False, cartesian_prod=cartesian_prod)
if _meta is not None:
res.meta = _meta
elif _type == 'Axis':
if name is None:
name = str(pd_obj.name)
if name == 'None':
name = None
labels = pd_obj.values
if 'dtype_kind' in attrs and attrs['dtype_kind'] == 'U':
# this check is there because there are cases where dtype_kind is 'U' but pandas returns
# an array with object dtype containing bytes instead of a string array, and in that case
# np.char.decode does not work
# this is at least the case for Python2 + Pandas 0.24.2 combination
if labels.dtype.kind == 'O':
labels = np.array([l.decode('utf-8') for l in labels], dtype='U')
else:
labels = np.char.decode(labels, 'utf-8')
res = Axis(labels=labels, name=name)
res._iswildcard = attrs['wildcard']
elif _type == 'Group':
if name is None:
name = str(pd_obj.name)
if name == 'None':
name = None
key = pd_obj.values
if 'dtype_kind' in attrs and attrs['dtype_kind'] == 'U':
key = np.char.decode(key, 'utf-8')
axis = read_hdf(filepath_or_buffer, attrs['axis_key'])
res = LGroup(key=key, name=name, axis=axis)
return res
@register_file_handler('pandas_hdf', ['h5', 'hdf'])
class PandasHDFHandler(FileHandler):
r"""
Handler for HDF5 files using Pandas.
"""
def _open_for_read(self):
self.handle = HDFStore(self.fname, mode='r')
def _open_for_write(self):
self.handle = HDFStore(self.fname)
def list_items(self):
keys = [key.strip('/') for key in self.handle.keys()]
# axes
items = [(key.split('/')[-1], 'Axis') for key in keys if '__axes__' in key]
# groups
items += [(key.split('/')[-1], 'Group') for key in keys if '__groups__' in key]
# arrays
items += [(key, 'Array') for key in keys if '/' not in key]
return items
def _read_item(self, key, type, *args, **kwargs):
if type == 'Array':
hdf_key = '/' + key
elif type == 'Axis':
hdf_key = '__axes__/' + key
elif type == 'Group':
hdf_key = '__groups__/' + key
else:
raise TypeError()
return read_hdf(self.handle, hdf_key, *args, **kwargs)
def _dump_item(self, key, value, *args, **kwargs):
if isinstance(value, Array):
hdf_key = '/' + key
value.to_hdf(self.handle, hdf_key, *args, **kwargs)
elif isinstance(value, Axis):
hdf_key = '__axes__/' + key
value.to_hdf(self.handle, hdf_key, *args, **kwargs)
elif isinstance(value, Group):
hdf_key = '__groups__/' + key
hdf_axis_key = '__axes__/' + value.axis.name
value.to_hdf(self.handle, hdf_key, hdf_axis_key, *args, **kwargs)
else:
raise TypeError()
def _read_metadata(self):
metadata = Metadata.from_hdf(self.handle)
if metadata is None:
metadata = Metadata()
return metadata
def _dump_metadata(self, metadata):
metadata.to_hdf(self.handle)
def close(self):
self.handle.close()
| gpl-3.0 |
EPFL-LCSB/pytfa | pytfa/analysis/manipulation.py | 1 | 2376 | from ..core.model import Solution
import pandas as pd
def apply_reaction_variability(tmodel, va, inplace = True):
"""
Applies the VA results as bounds for the reactions of a cobra_model
:param inplace:
:param tmodel:
:param va:
:return:
"""
if inplace:
_tmodel = tmodel
else:
_tmodel = tmodel.copy()
for this_reaction in _tmodel.reactions:
if this_reaction.id not in va.index:
continue
this_reaction.lower_bound = va.loc[this_reaction.id,'minimum']
this_reaction.upper_bound = va.loc[this_reaction.id,'maximum']
return _tmodel
def apply_generic_variability(tmodel,va, inplace = True):
"""
Reactions a dealt with cobra, but the other variables added use pytfa's
interface: the class GenericVariable. We use a different method to apply
variability directly in the solver
:param tmodel:
:param va:
:param inplace:
:return:
"""
if inplace:
_tmodel = tmodel
else:
_tmodel = tmodel.copy()
for varname in va.index:
the_min,the_max = va.loc[varname,['minimum','maximum']]
_tmodel._var_dict[varname].variable.lb = the_min
_tmodel._var_dict[varname].variable.ub = the_max
return _tmodel
def apply_directionality(tmodel, solution, inplace = True):
"""
Takes a flux solution and transfers its reaction directionality as
constraints for the cobra_model
:param inplace:
:param tmodel:
:param solution:
:return:
"""
if inplace:
_tmodel = tmodel
else:
_tmodel = tmodel.copy()
if isinstance(solution, Solution):
sol = solution.raw
elif isinstance(solution, pd.Series) or isinstance(solution, pd.DataFrame):
sol = solution
else:
raise ArgumentError('solution object should be of class Solution or pandas.Series')
for this_reaction in _tmodel.reactions:
backward_use = _tmodel.backward_use_variable.get_by_id(this_reaction.id)
forward_use = _tmodel.forward_use_variable.get_by_id(this_reaction.id)
backward_use.variable.lb = round(sol[backward_use.name])
backward_use.variable.ub = round(sol[backward_use.name])
forward_use.variable.lb = round(sol[forward_use.name])
forward_use.variable.ub = round(sol[forward_use.name])
return _tmodel
| apache-2.0 |
jtwhite79/pyemu | pyemu/pst/pst_controldata.py | 1 | 18195 | """This module contains several class definitions for obseure parts of the
PEST control file: `ControlData` ('* control data'), `RegData` ('* regularization')
and `SvdData` ('* singular value decomposition'). These
classes are automatically created and appended to `Pst` instances;
users shouldn't need to deal with these classes explicitly
"""
from __future__ import print_function, division
import os
import copy
import warnings
import numpy as np
import pandas
from ..pyemu_warnings import PyemuWarning
pandas.options.display.max_colwidth = 100
# from pyemu.pst.pst_handler import SFMT,SFMT_LONG,FFMT,IFMT
# formatters
SFMT = lambda x: "{0:>20s}".format(str(x))
"""lambda function for string formatting `str` types into 20-char widths"""
SFMT_LONG = lambda x: "{0:>50s}".format(str(x))
"""lambda function for string formatting `str` types into 50-char widths"""
IFMT = lambda x: "{0:>10d}".format(int(x))
"""lambda function for string formatting `int` types into 10-char widths"""
FFMT = lambda x: "{0:>15.6E}".format(float(x))
"""lambda function for string formatting `float` types into 15-char widths"""
CONTROL_DEFAULT_LINES = """restart estimation
0 0 0 0 0 0
0 0 single point 1 0 0 noobsreref
2.000000e+001 -3.000000e+000 3.000000e-001 1.000000e-002 -7 999 lamforgive noderforgive
1.000000e+001 1.000000e+001 1.000000e-003 0 0
1.000000e-001 1 1.1 noaui nosenreuse noboundscale
30 1.000000e-002 3 3 1.000000e-002 3 0.0 1 -1.0
0 0 0 0 jcosave verboserec jcosaveitn reisaveitn parsaveitn noparsaverun""".lower().split(
"\n"
)
CONTROL_VARIABLE_LINES = """RSTFLE PESTMODE
NPAR NOBS NPARGP NPRIOR NOBSGP [MAXCOMPDIM]
NTPLFLE NINSFLE PRECIS DPOINT [NUMCOM] [JACFILE] [MESSFILE] [OBSREREF]
RLAMBDA1 RLAMFAC PHIRATSUF PHIREDLAM NUMLAM [JACUPDATE] [LAMFORGIVE] [DERFORGIVE]
RELPARMAX FACPARMAX FACORIG [IBOUNDSTICK] [UPVECBEND]
PHIREDSWH [NOPTSWITCH] [SPLITSWH] [DOAUI] [DOSENREUSE] [BOUNDSCALE]
NOPTMAX PHIREDSTP NPHISTP NPHINORED RELPARSTP NRELPAR [PHISTOPTHRESH] [LASTRUN] [PHIABANDON]
ICOV ICOR IEIG [IRES] [JCOSAVE] [VERBOSEREC] [JCOSAVEITN] [REISAVEITN] [PARSAVEITN] [PARSAVERUN]""".lower().split(
"\n"
)
REG_VARIABLE_LINES = """PHIMLIM PHIMACCEPT [FRACPHIM] [MEMSAVE]
WFINIT WFMIN WFMAX [LINREG] [REGCONTINUE]
WFFAC WFTOL IREGADJ [NOPTREGADJ REGWEIGHTRAT [REGSINGTHRESH]]""".lower().split(
"\n"
)
REG_DEFAULT_LINES = """ 1.0e-10 1.05e-10 0.1 nomemsave
1.0 1.0e-10 1.0e10 linreg continue
1.3 1.0e-2 1 1.5 1.5 0.5""".lower().split(
"\n"
)
class RegData(object):
"""an object that encapsulates the regularization section
of the PEST control file
"""
def __init__(self):
self.optional_dict = {}
for vline, dline in zip(REG_VARIABLE_LINES, REG_DEFAULT_LINES):
vraw = vline.split()
draw = dline.split()
for v, d in zip(vraw, draw):
o = False
if "[" in v:
o = True
v = v.replace("[", "").replace("]", "")
super(RegData, self).__setattr__(v, d)
self.optional_dict[v] = o
self.should_write = ["phimlim", "phimaccept", "fracphim", "wfinit"]
def write(self, f):
"""write the regularization section to an open
file handle
Args:
f (`file handle`): open file handle for writing
"""
f.write("* regularization\n")
for vline in REG_VARIABLE_LINES:
vraw = vline.strip().split()
for v in vraw:
v = v.replace("[", "").replace("]", "")
if v not in self.optional_dict.keys():
raise Exception("RegData missing attribute {0}".format(v))
f.write("{0} ".format(self.__getattribute__(v)))
f.write("\n")
def write_keyword(self, f):
"""write the regularization section to an open
file handle using the keyword-style format
Args:
f (`file handle`): open file handle for writing
"""
for vline in REG_VARIABLE_LINES:
vraw = vline.strip().split()
for v in vraw:
v = v.replace("[", "").replace("]", "")
if v not in self.should_write:
continue
if v not in self.optional_dict.keys():
raise Exception("RegData missing attribute {0}".format(v))
f.write("{0:30} {1:>10}\n".format(v, self.__getattribute__(v)))
class SvdData(object):
"""encapsulates the singular value decomposition
section of the PEST control file
Args:
kwargs (`dict`): optional keyword arguments
"""
def __init__(self, **kwargs):
self.svdmode = kwargs.pop("svdmode", 1)
self.maxsing = kwargs.pop("maxsing", 10000000)
self.eigthresh = kwargs.pop("eigthresh", 1.0e-6)
self.eigwrite = kwargs.pop("eigwrite", 1)
def write_keyword(self, f):
"""write an SVD section to a file handle using
keyword-style format
Args:
f (`file handle`): open file handle for writing
"""
f.write("{0:30} {1:>10}\n".format("svdmode", self.svdmode))
f.write("{0:30} {1:>10}\n".format("maxsing", self.maxsing))
f.write("{0:30} {1:>10}\n".format("eigthresh", self.eigthresh))
f.write("{0:30} {1:>10}\n".format("eigwrite", self.eigwrite))
def write(self, f):
"""write an SVD section to a file handle
Args:
f (`file handle`): open file handle for writing
"""
f.write("* singular value decomposition\n")
f.write(IFMT(self.svdmode) + "\n")
f.write(IFMT(self.maxsing) + " " + FFMT(self.eigthresh) + "\n")
f.write("{0}\n".format(self.eigwrite))
def parse_values_from_lines(self, lines):
"""parse values from lines of the SVD section of
a PEST control file
Args:
lines ([`strs`]): the raw ASCII lines from the control file
"""
assert (
len(lines) == 3
), "SvdData.parse_values_from_lines: expected " + "3 lines, not {0}".format(
len(lines)
)
try:
self.svdmode = int(lines[0].strip().split()[0])
except Exception as e:
raise Exception(
"SvdData.parse_values_from_lines: error parsing"
+ " svdmode from line {0}: {1} \n".format(lines[0], str(e))
)
try:
raw = lines[1].strip().split()
self.maxsing = int(raw[0])
self.eigthresh = float(raw[1])
except Exception as e:
raise Exception(
"SvdData.parse_values_from_lines: error parsing"
+ " maxsing and eigthresh from line {0}: {1} \n".format(
lines[1], str(e)
)
)
# try:
# self.eigwrite = int(lines[2].strip())
# except Exception as e:
# raise Exception("SvdData.parse_values_from_lines: error parsing" + \
# " eigwrite from line {0}: {1} \n".format(lines[2],str(e)))
self.eigwrite = lines[2].strip()
class ControlData(object):
"""an object that encapsulates the control data section
of the PEST control file
Notes:
This class works hard to protect the variables in the control data section.
It type checks attempts to change values to make sure the type being passed
matches the expected type of the attribute.
"""
def __init__(self):
super(ControlData, self).__setattr__(
"formatters", {np.int32: IFMT, np.float64: FFMT, str: SFMT}
)
super(ControlData, self).__setattr__("_df", self.get_dataframe())
# acceptable values for most optional string inputs
super(ControlData, self).__setattr__(
"accept_values",
{
"doaui": ["aui", "noaui"],
"dosenreuse": ["senreuse", "nosenreuse"],
"boundscale": ["boundscale", "noboundscale"],
"jcosave": ["jcosave", "nojcosave"],
"verboserec": ["verboserec", "noverboserec"],
"jcosaveitn": ["jcosaveitn", "nojcosvaeitn"],
"reisaveitn": ["reisaveitn", "noreisaveitn"],
"parsaveitn": ["parsaveitn", "noparsaveitn"],
"parsaverun": ["parsaverun", "noparsaverun"],
},
)
self._df.index = self._df.name.apply(lambda x: x.replace("[", "")).apply(
lambda x: x.replace("]", "")
)
super(ControlData, self).__setattr__(
"keyword_accessed", ["pestmode", "noptmax"]
)
counters = ["npar", "nobs", "npargp", "nobsgp", "nprior", "ntplfle", "ninsfle"]
super(ControlData, self).__setattr__("counters", counters)
# self.keyword_accessed = ["pestmode","noptmax"]
super(ControlData, self).__setattr__("passed_options", {})
def __setattr__(self, key, value):
if key == "_df":
super(ControlData, self).__setattr__("_df", value)
return
assert key in self._df.index, str(key) + " not found in attributes"
self._df.loc[key, "value"] = self._df.loc[key, "type"](value)
# super(ControlData, self).__getattr__("keyword_accessed").append(key)
if key not in self.counters:
self.keyword_accessed.append(key)
def __getattr__(self, item):
if item == "_df":
return self._df.copy()
assert item in self._df.index, str(item) + " not found in attributes"
return self._df.loc[item, "value"]
@staticmethod
def get_dataframe():
"""get a generic (default) control section as a dataframe
Returns:
`pandas.DataFrame`: a dataframe of control data information
"""
names = []
[names.extend(line.split()) for line in CONTROL_VARIABLE_LINES]
defaults = []
[defaults.extend(line.split()) for line in CONTROL_DEFAULT_LINES]
types, required, cast_defaults, formats = [], [], [], []
for name, default in zip(names, defaults):
if "[" in name or "]" in name:
required.append(False)
else:
required.append(True)
v, t, f = ControlData._parse_value(default)
types.append(t)
formats.append(f)
cast_defaults.append(v)
return pandas.DataFrame(
{
"name": names,
"type": types,
"value": cast_defaults,
"required": required,
"format": formats,
"passed": False,
}
)
@staticmethod
def _parse_value(value):
try:
v = int(value)
t = np.int32
f = IFMT
except Exception as e:
try:
v = float(value)
t = np.float64
f = FFMT
except Exception as ee:
v = value.lower()
t = str
f = SFMT
return v, t, f
def parse_values_from_lines(self, lines, iskeyword=False):
"""cast the string lines for a pest control file into actual inputs
Args:
lines ([`str`]): raw ASCII lines from pest control file
"""
self._df.loc[:, "passed"] = False
if iskeyword:
# self._df.loc[:, "passed"] = True
extra = {}
for line in lines:
raw = line.strip().split()
if len(raw) == 0 or raw[0] == "#":
continue
name = raw[0].strip().lower()
value = raw[1].strip()
v, t, f = self._parse_value(value)
if name not in self._df.index:
extra[name] = v
else:
# if the parsed values type isn't right
if t != self._df.loc[name, "type"]:
# if a float was expected and int return, not a problem
if t == np.int32 and self._df.loc[name, "type"] == np.float64:
self._df.loc[name, "value"] = np.float64(v)
self._df.loc[name, "passed"] = True
# if this is a required input, throw
elif self._df.loc[name, "required"]:
raise Exception(
"wrong type found for variable " + name + ":" + str(t)
)
else:
# else, since this problem is usually a string, check for acceptable values
found = False
for nname, avalues in self.accept_values.items():
if v in avalues:
if t == self._df.loc[nname, "type"]:
self._df.loc[nname, "value"] = v
found = True
self._df.loc[nname, "passed"] = True
break
if not found:
warnings.warn(
"non-conforming value found for "
+ name
+ ":"
+ str(v)
+ "...ignoring",
PyemuWarning,
)
else:
self._df.loc[name, "value"] = v
self._df.loc[name, "passed"] = True
return extra
assert len(lines) == len(
CONTROL_VARIABLE_LINES
), "ControlData error: len of lines not equal to " + str(
len(CONTROL_VARIABLE_LINES)
)
for iline, line in enumerate(lines):
vals = line.strip().split()
names = CONTROL_VARIABLE_LINES[iline].strip().split()
for name, val in zip(names, vals):
v, t, f = self._parse_value(val)
name = name.replace("[", "").replace("]", "")
# if the parsed values type isn't right
if t != self._df.loc[name, "type"]:
# if a float was expected and int return, not a problem
if t == np.int32 and self._df.loc[name, "type"] == np.float64:
self._df.loc[name, "value"] = np.float64(v)
# self._df.loc[name, "passed"] = True
# if this is a required input, throw
elif self._df.loc[name, "required"]:
raise Exception(
"wrong type found for variable " + name + ":" + str(t)
)
else:
# else, since this problem is usually a string, check for acceptable values
found = False
for nname, avalues in self.accept_values.items():
if v in avalues:
if t == self._df.loc[nname, "type"]:
self._df.loc[nname, "value"] = v
found = True
# self._df.loc[nname, "passed"] = True
break
if not found:
warnings.warn(
"non-conforming value found for "
+ name
+ ":"
+ str(v)
+ "...ignoring",
PyemuWarning,
)
else:
self._df.loc[name, "value"] = v
# self._df.loc[name, "passed"] = True
return {}
def copy(self):
cd = ControlData()
cd._df = self._df
return cd
@property
def formatted_values(self):
"""list the entries and current values in the control data section
Returns:
pandas.Series: formatted_values for the control data entries
"""
# passed_df = self._df.copy()
# blank = passed_df.apply(lambda x: x.type==str and x.passed==False,axis=1)
# passed_df.loc[blank,"value"] = ""
return self._df.apply(lambda x: self.formatters[x["type"]](x["value"]), axis=1)
def write_keyword(self, f):
"""write the control data entries to an open file handle
using keyword-style format.
Args:
f (file handle): open file handle to write to
Notes:
only writes values that have been accessed since instantiation
"""
kw = super(ControlData, self).__getattribute__("keyword_accessed")
f.write("* control data keyword\n")
for n, v in zip(self._df.name, self.formatted_values):
if n not in kw:
if n not in self._df.index:
continue
elif not self._df.loc[n, "passed"]:
continue
f.write("{0:30} {1}\n".format(n, v))
def write(self, f):
"""write control data section to a file
Args:
f (file handle): open file handle to write to
"""
if isinstance(f, str):
f = open(f, "w")
f.write("pcf\n")
f.write("* control data\n")
for line in CONTROL_VARIABLE_LINES:
[
f.write(self.formatted_values[name.replace("[", "").replace("]", "")])
for name in line.split()
if self._df.loc[name.replace("[", "").replace("]", ""), "passed"]
== True
or self._df.loc[name.replace("[", "").replace("]", ""), "required"]
== True
]
f.write("\n")
| bsd-3-clause |
zuku1985/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
tulip-control/tulip-control | examples/developer/fuel_tank/continuous_switched_test.py | 1 | 2637 | """test hybrid construction"""
from __future__ import print_function
import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from tulip import abstract, hybrid
from polytope import box2poly
input_bound = 0.4
uncertainty = 0.05
cont_state_space = box2poly([[0., 3.], [0., 2.]])
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
sys_dyn = dict()
allh = [0.5, 1.1, 1.5]
modes = []
modes.append(('normal', 'fly'))
modes.append(('refuel', 'fly'))
modes.append(('emergency', 'fly'))
"""First PWA mode"""
def subsys0(h):
A = np.array([[1.1052, 0.], [ 0., 1.1052]])
B = np.array([[1.1052, 0.], [ 0., 1.1052]])
E = np.array([[1,0], [0,1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [h, 2.]])
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, dom)
return sys_dyn
def subsys1(h):
A = np.array([[0.9948, 0.], [0., 1.1052]])
B = np.array([[-1.1052, 0.], [0., 1.1052]])
E = np.array([[1, 0], [0, 1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [0., h]])
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, dom)
return sys_dyn
for mode, h in zip(modes, allh):
subsystems = [subsys0(h), subsys1(h)]
sys_dyn[mode] = hybrid.PwaSysDyn(subsystems, cont_state_space)
"""Switched Dynamics"""
# collect env, sys_modes
env_modes, sys_modes = zip(*modes)
msg = 'Found:\n'
msg += '\t Environment modes: ' + str(env_modes)
msg += '\t System modes: ' + str(sys_modes)
switched_dynamics = hybrid.SwitchedSysDyn(
disc_domain_size=(len(env_modes), len(sys_modes)),
dynamics=sys_dyn,
env_labels=env_modes,
disc_sys_labels=sys_modes,
cts_ss=cont_state_space
)
print(switched_dynamics)
ppp = abstract.prop2part(cont_state_space, cont_props)
ppp, new2old = abstract.part2convex(ppp)
"""Discretize to establish transitions"""
start = time.time()
N = 8
trans_len=1
disc_params = {}
for mode in modes:
disc_params[mode] = {'N':N, 'trans_length':trans_len}
swab = abstract.multiproc_discretize_switched(
ppp, switched_dynamics, disc_params,
plot=True, show_ts=True
)
print(swab)
axs = swab.plot(show_ts=True)
for i, ax in enumerate(axs):
ax.figure.savefig('swab_' + str(i) + '.pdf')
#ax = sys_ts.ts.plot()
elapsed = (time.time() - start)
print('Discretization lasted: ' + str(elapsed))
| bsd-3-clause |
qifeigit/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
cfei18/incubator-airflow | setup.py | 1 | 11546 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
azure_blob_storage = ['azure-storage>=0.34.0']
azure_data_lake = [
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19'
]
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery>=4.1.1, <4.2.0',
'flower>=0.7.3, <1.0'
]
cgroups = [
'cgroupspy>=0.1.4',
]
# major update coming soon, clamp to 0.x
cloudant = ['cloudant>=0.5.9,<2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker>=2.0.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2>=0.9.2',
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-cloud-container>=0.1.1',
'PyOpenSSL',
'pandas-gbq'
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=0.9.9.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb>=0.1.1']
postgres = ['psycopg2-binary>=2.7.4']
qds = ['qds-sdk>=1.9.6']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis>=2.10.5']
s3 = ['boto3>=1.7.0']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'mongomock',
'moto==1.1.19',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock'
]
devel_minreq = devel + kubernetes + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + s3 + slack + crypto + oracle +
docker + ssh + kubernetes + celery + azure_blob_storage + redis + gcp_api +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch + azure_data_lake +
atlas)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12.4, <0.13',
'flask-appbuilder>=1.11.1, <2.0.0',
'flask-admin==1.4.1',
'flask-caching>=1.3.3, <1.4.0',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.2.0',
'sqlalchemy-utc>=0.9.0',
'tabulate>=0.7.5, <0.8.0',
'tenacity==4.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.incubator.apache.org',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
wazeerzulfikar/scikit-learn | examples/linear_model/plot_logistic_path.py | 37 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
DonBeo/statsmodels | statsmodels/nonparametric/_kernel_base.py | 29 | 18238 | """
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
from statsmodels.compat.python import range, string_types
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
from . import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, string_types):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<https://pythonhosted.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in range(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
| bsd-3-clause |
ominux/scikit-learn | sklearn/neighbors/graph.py | 14 | 2839 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD, (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def kneighbors_graph(X, n_neighbors, mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors).fit(X)
return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius).fit(X)
return X.radius_neighbors_graph(X._fit_X, radius, mode)
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
architecture-building-systems/CEAforArcGIS | cea/technologies/heat_exchangers.py | 2 | 6169 | """
heat exchangers
"""
from math import log, ceil
import pandas as pd
import numpy as np
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.technologies.constants import MAX_NODE_FLOW
from cea.analysis.costs.equations import calc_capex_annualized, calc_opex_annualized
__author__ = "Thuy-An Nguyen"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Thuy-An Nguyen", "Tim Vollrath", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
# investment and maintenance costs
def calc_Cinv_HEX(Q_design_W, locator, config, technology_type):
"""
Calculates the cost of a heat exchanger (based on A+W cost of oil boilers) [CHF / a]
:type Q_design_W : float
:param Q_design_W: Design Load of Boiler
:rtype InvC_return : float
:returns InvC_return: total investment Cost in [CHF]
:rtype InvCa : float
:returns InvCa: annualized investment costs in [CHF/a]
"""
if Q_design_W > 0:
HEX_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="HEX")
HEX_cost_data = HEX_cost_data[HEX_cost_data['code'] == technology_type]
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database
if Q_design_W < HEX_cost_data.iloc[0]['cap_min']:
Q_design_W = HEX_cost_data.iloc[0]['cap_min']
HEX_cost_data = HEX_cost_data[
(HEX_cost_data['cap_min'] <= Q_design_W) & (HEX_cost_data['cap_max'] > Q_design_W)]
Inv_a = HEX_cost_data.iloc[0]['a']
Inv_b = HEX_cost_data.iloc[0]['b']
Inv_c = HEX_cost_data.iloc[0]['c']
Inv_d = HEX_cost_data.iloc[0]['d']
Inv_e = HEX_cost_data.iloc[0]['e']
Inv_IR = HEX_cost_data.iloc[0]['IR_%']
Inv_LT = HEX_cost_data.iloc[0]['LT_yr']
Inv_OM = HEX_cost_data.iloc[0]['O&M_%'] / 100
InvC = Inv_a + Inv_b * (Q_design_W) ** Inv_c + (Inv_d + Inv_e * Q_design_W) * log(Q_design_W)
Capex_a_HEX_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)
Opex_fixed_HEX_USD = InvC * Inv_OM
Capex_HEX_USD = InvC
else:
Capex_a_HEX_USD = 0.0
Opex_fixed_HEX_USD = 0.0
Capex_HEX_USD = 0.0
return Capex_a_HEX_USD, Opex_fixed_HEX_USD, Capex_HEX_USD
def calc_Cinv_HEX_hisaka(network_info):
"""
Calculates costs of all substation heat exchangers in a network.
Used in thermal_network_optimization.
"""
## read in cost values from database
HEX_prices = pd.read_excel(network_info.locator.get_database_conversion_systems(),
sheet_name="HEX", index_col=0)
a = HEX_prices['a']['District substation heat exchanger']
b = HEX_prices['b']['District substation heat exchanger']
c = HEX_prices['c']['District substation heat exchanger']
d = HEX_prices['d']['District substation heat exchanger']
e = HEX_prices['e']['District substation heat exchanger']
Inv_IR = HEX_prices['IR_%']['District substation heat exchanger']
Inv_LT = HEX_prices['LT_yr']['District substation heat exchanger']
Inv_OM = HEX_prices['O&M_%']['District substation heat exchanger'] / 100
## list node id of all substations
# read in nodes list
all_nodes = pd.read_csv(network_info.locator.get_thermal_network_node_types_csv_file(network_info.network_type,
network_info.network_name))
Capex_a = 0.0
Opex_a_fixed = 0.0
substation_node_id_list = []
# add buildings to node id list
for building in network_info.building_names:
# check if building is connected to network
if building not in network_info.building_names[network_info.disconnected_buildings_index]:
# add HEX cost
node_id = int(np.where(all_nodes['Building'] == building)[0])
substation_node_id_list.append(all_nodes['Name'][node_id])
# add plants to node id list
plant_id_list = np.where(all_nodes['Type'] == 'Plant')[0]
# find plant nodes
for plant_id in plant_id_list:
substation_node_id_list.append('NODE' + str(plant_id))
## calculate costs of hex at substations
for node_id in substation_node_id_list:
# read in node mass flows
node_flows = pd.read_csv(
network_info.locator.get_nominal_node_mass_flow_csv_file(network_info.network_type, network_info.network_name))
# find design condition node mcp
node_flow = max(node_flows[node_id])
if node_flow > 0:
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least
# capacity for the corresponding technology from the database # TODO: add minimum capacity to cost function
# Split into several HEXs if flows are too high
if node_flow <= MAX_NODE_FLOW:
mcp_sub = node_flow * HEAT_CAPACITY_OF_WATER_JPERKGK
Capex_substation_hex = a + b * mcp_sub ** c + d * np.log(mcp_sub) + e * mcp_sub * np.log(mcp_sub)
else:
# we need to split into several HEXs
Capex_substation_hex = 0
number_of_HEXs = int(ceil(node_flow / MAX_NODE_FLOW))
nodeflow_nom = node_flow / number_of_HEXs
mcp_sub = nodeflow_nom * HEAT_CAPACITY_OF_WATER_JPERKGK
for i in range(number_of_HEXs):
Capex_substation_hex = Capex_substation_hex + (a + b * mcp_sub ** c + d * np.log(mcp_sub) + e * mcp_sub * np.log(mcp_sub))
Capex_a_substation_hex = calc_capex_annualized(Capex_substation_hex, Inv_IR, Inv_LT)
Opex_fixed_substation_hex = Capex_substation_hex * Inv_OM
# aggregate all substation costs in a network
Capex_a = Capex_a + Capex_a_substation_hex
Opex_a_fixed = Opex_a_fixed + Opex_fixed_substation_hex
return Capex_a, Opex_a_fixed
| mit |
vantares/trading-with-python | lib/bats.py | 78 | 3458 | #-------------------------------------------------------------------------------
# Name: BATS
# Purpose: get data from BATS exchange
#
# Author: jev
#
# Created: 17/08/2013
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio
| bsd-3-clause |
idaks/PW-explorer | PW_explorer/Input_Parsers/Clingo_Parser/clingo_parser.py | 1 | 6566 | from antlr4 import *
from .Antlr_Files.ClingoLexer import ClingoLexer
from .Antlr_Files.ClingoParser import ClingoParser
from .Antlr_Files.ClingoListener import ClingoListener
from ...helper import isfloat, PossibleWorld, Relation
import pandas as pd
import numpy as np
from antlr4.tree.Trees import Trees
def rearrangePWSandRLS(relations, pws):
"""
Sort the possible worlds and relations by their ids
:return: None
"""
relations.sort(key=lambda x: x.r_id)
pws.sort(key=lambda x: x.pw_id)
def loadIntoPandas(relations, pws, dfs):
"""
Populate the Pandas DF, one for each relation
:return: None
"""
for n, rl in enumerate(relations):
cls = ['pw']
cls.extend([str('x' + str(i)) for i in range(1, rl.arity + 1)])
rws = [] # could convert into numpy if sure it's all float/int
for m, pw in enumerate(pws):
if rl.relation_name in pw.rls:
rl_data_pw = []
for rl_data in pw.rls[rl.relation_name]:
rl_data_pw.append(rl_data.copy())
rl_data_pw[-1].insert(0, pw.pw_id)
rws.extend(rl_data_pw)
df = pd.DataFrame(rws, columns=cls)
dfs[rl.relation_name] = df
######################################################################################
######################################################################################
class AntlrClingoListener(ClingoListener):
def __init__(self):
self.pws = []
self.relations = []
self.expected_pws = 0
self.curr_pw = None
self.curr_pw_id = 1
self.curr_fact = None
self.curr_fact_data = None
self.curr_fact_depth = 0
self.n_facts = 0
self.dfs = {}
self.silent = False
def enterClingoOutput(self, ctx):
if ctx.OPTIMUM_FOUND() is not None:
if ctx.OPTIMUM_FOUND().getText() == 'UNSATISFIABLE':
if not self.silent:
print("The problem is unsatisfiable")
# print("enterClingoOutput")
def enterPw(self, ctx):
self.curr_pw = PossibleWorld(self.curr_pw_id)
# assert curr_pw.pw_id == int(ctx.TEXT(0).getText())
if ctx.TEXT(1) is not None:
self.curr_pw.pw_soln = float(ctx.TEXT(1).getText()) if isfloat(ctx.TEXT(1).getText()) else ctx.TEXT(1).getText()
def enterFact(self, ctx):
self.curr_fact_depth += 1
rel_name = ctx.TEXT().getText()
if self.curr_fact_depth == 1:
self.curr_fact = Relation(rel_name)
# Set defaults in case this is a 0-arity relation
self.curr_fact_data = []
else:
tmp_ptr = self.curr_fact_data
for _ in range(self.curr_fact_depth-2):
tmp_ptr = tmp_ptr[-1]
tmp_ptr.append([rel_name])
def enterFact_text(self, ctx:ClingoParser.Fact_textContext):
tmp_ptr = self.curr_fact_data
for _ in range(self.curr_fact_depth - 1):
tmp_ptr = tmp_ptr[-1]
tmp_ptr.append(ctx.TEXT().getText())
def exitFact(self, ctx):
if self.curr_fact_depth == 1:
self.curr_fact.arity = len(self.curr_fact_data)
rl_name_mod = str(self.curr_fact.relation_name + '_' + str(self.curr_fact.arity))
self.curr_fact.relation_name = rl_name_mod
foundMatch = False
for rl in self.relations:
if self.curr_fact.relation_name == rl.relation_name and self.curr_fact.arity == rl.arity:
self.curr_fact.r_id = rl.r_id
foundMatch = True
break
if not foundMatch:
newRl = Relation(self.curr_fact.relation_name)
newRl.arity = self.curr_fact.arity
newRl.r_id = self.n_facts
self.n_facts += 1
self.relations.append(newRl)
self.curr_fact.r_id = newRl.r_id
self.curr_pw.add_relation(self.curr_fact.relation_name, self.curr_fact_data)
self.curr_fact = None # could introduce bugs if passed by pointer in the upper statement, so be careful, use copy() if needed
self.curr_fact_data = None
self.curr_fact_depth -= 1
def exitPw(self, ctx):
self.pws.append(self.curr_pw) # again be wary, else use .copy()
self.curr_pw = None
self.curr_pw_id += 1
def enterOptimum(self, ctx):
optimum_found = ctx.TEXT().getText()
if optimum_found == 'yes':
if not self.silent:
print('Optimum Solution was found')
elif optimum_found == 'no':
if not self.silent:
print('Optimum Solution was not found')
else:
if not self.silent:
print('Unexpected Output:', optimum_found)
def enterOptimization(self, ctx):
opt_soln = ctx.TEXT().getText()
if not self.silent:
print('Optimized Solution is', opt_soln)
def enterModels(self, ctx):
num_models = ctx.TEXT().getText()
num_models = int(num_models)
if not self.silent:
print("Number of Models:", num_models)
self.expected_pws = num_models
def exitClingoOutput(self, ctx):
# loading into pandas DF
rearrangePWSandRLS(self.relations, self.pws)
loadIntoPandas(self.relations, self.pws, self.dfs)
######################################################################################
def __parse_clingo_output__(input_stream, silent=False, print_parse_tree=False):
lexer = ClingoLexer(input_stream)
# use this line to take input from the cmd line
# lexer = ClingoLexer(StdinStream())
ct_stream = CommonTokenStream(lexer)
parser = ClingoParser(ct_stream)
tree = parser.clingoOutput()
if print_parse_tree:
print(Trees.toStringTree(tree, None, parser))
pw_analyzer = AntlrClingoListener()
pw_analyzer.silent = silent
walker = ParseTreeWalker()
walker.walk(pw_analyzer, tree)
return pw_analyzer.dfs, pw_analyzer.relations, pw_analyzer.pws
def parse_clingo_output_from_file(fname, silent=False, print_parse_tree=False):
input_stream = FileStream(fname)
return __parse_clingo_output__(input_stream, silent, print_parse_tree)
def parse_clingo_output_from_string(clingo_output_string, silent=False, print_parse_tree=False):
input_stream = InputStream(clingo_output_string)
return __parse_clingo_output__(input_stream, silent, print_parse_tree)
| apache-2.0 |
prheenan/Research | Personal/EventDetection/Docs/ToyGraphs/dna_tables/main_dna_sequences.py | 1 | 2026 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
def read_seq(input_file):
with open(input_file) as f:
# first line is comments
f.readline()
# second line is sequence
seq = f.readline()
return seq
def format_seq(seq,
terminal_biotin="B",
terminal_DBCO="D",
internal_biotin="T"):
seq = seq.replace("5dbcoteg",terminal_DBCO)
seq = seq.replace("3bioteg",terminal_biotin)
seq = seq.replace("dbco",terminal_DBCO)
seq = seq.replace("5biotin",terminal_biotin)
seq = seq.replace("ibiodt",internal_biotin)
seq = seq.replace("/","")
return seq
class seqs:
def __init__(self,fwd_primer,rev_primer,hairpin):
sanit = lambda x: x.replace(" ","").lower()
self.fwd = sanit(fwd_primer)
self.rev = sanit(rev_primer)
self.hairpin = sanit(hairpin)
def fwd_rev_hairpin_formatted(self):
return format_seq(self.fwd),\
format_seq(self.rev),\
format_seq(self.hairpin)
def get_sequences(base_dir):
return seqs(read_seq(base_dir + "1607F_DBCO"),
read_seq(base_dir + "3520R_4xBio"),
read_seq(base_dir + "68nt_hairpin"))
def get_latex_table(sequences):
formatted_seqs = sequences.fwd_rev_hairpin_formatted()
line_end = " \\\\ \e \n"
to_ret = "Name & Sequence" + line_end
names = ["Forward primer for 650nm DNA",
"Reverse primer for 650nm DNA",
"68nt hairpin"]
rows = ["{:s} & {:s}".format(name,seq)
for name,seq in zip(names,formatted_seqs)]
to_ret += line_end.join(rows) + line_end
return to_ret
def run(base_dir="./"):
"""
"""
sequences = get_sequences(base_dir=base_dir)
table = get_latex_table(sequences)
print(table)
if __name__ == "__main__":
run()
| gpl-3.0 |
lweasel/piquant | test/test_classifiers.py | 2 | 4224 | import pandas as pd
import piquant.classifiers as classifiers
def _get_test_classifier(
column_name="dummy", value_extractor=lambda x: x,
grouped_stats=True, distribution_plot_range=None):
return classifiers._Classifier(
column_name, value_extractor, grouped_stats, distribution_plot_range)
def _get_test_levels_classifier(
column_name="dummy", value_extractor=lambda x: x,
levels=[10, 20, 30], closed=True):
return classifiers._LevelsClassifier(
column_name, value_extractor, levels, closed)
def test_get_classifiers_returns_classifiers_instances():
clsfrs = classifiers.get_classifiers()
assert all([isinstance(c, classifiers._Classifier) for c in clsfrs])
def test_classifier_get_column_name_returns_correct_name():
name = "column name"
c = _get_test_classifier(column_name=name)
assert c.get_column_name() == name
def test_classifier_get_value_return_correct_value():
col_name = "column name"
col_value = "column value"
df = pd.DataFrame.from_dict([{col_name: col_value, "dummy": "dummy"}])
c = _get_test_classifier(value_extractor=lambda x: x[col_name])
assert c.get_value(df.ix[0]) == col_value
def test_classifier_get_classification_value_returns_correct_value():
col_name = "column name"
col_value = "column value"
df = pd.DataFrame.from_dict([{col_name: col_value, "dummy": "dummy"}])
c = _get_test_classifier(value_extractor=lambda x: x[col_name])
assert c.get_classification_value(df.ix[0]) == col_value
def test_classifier_produces_grouped_stats_returns_correct_value():
gs = False
c = _get_test_classifier(grouped_stats=gs)
assert c.produces_grouped_stats() == gs
def test_classifier_produces_distribution_plots_returns_correct_value():
gs = False
c = _get_test_classifier(grouped_stats=gs)
assert c.produces_distribution_plots() != gs
def test_classifier_get_distribution_plot_range_returns_correct_value():
dpr = (10, 30)
c = _get_test_classifier(distribution_plot_range=dpr)
assert c.get_distribution_plot_range() == dpr
def test_classifier_get_value_labels_returns_correct_labels():
num_labels = 5
c = _get_test_classifier()
assert c.get_value_labels(num_labels) == range(1, num_labels + 1)
def test_classifier_get_stats_file_suffix_returns_correct_suffix_for_grouped_stats():
c = _get_test_classifier(column_name="column name", grouped_stats=True)
assert c.get_stats_file_suffix() == "_stats_by_column_name"
def test_classifier_get_stats_file_suffix_returns_correct_suffix_for_non_grouped_stats_and_ascending_order():
c = _get_test_classifier(column_name="column name", grouped_stats=False)
assert c.get_stats_file_suffix(True) == \
"_distribution_stats_asc_by_column_name"
def test_classifier_get_stats_file_suffix_returns_correct_suffix_for_non_grouped_stats_and_descending_order():
c = _get_test_classifier(column_name="column name", grouped_stats=False)
assert c.get_stats_file_suffix(False) == \
"_distribution_stats_desc_by_column_name"
def test_levels_classifier_get_classification_value_returns_correct_value():
col_name = "column name"
df = pd.DataFrame.from_dict([{col_name: 25, "dummy": "dummy"}])
c = _get_test_levels_classifier(value_extractor=lambda x: x[col_name])
assert c.get_classification_value(df.ix[0]) == 2
def test_levels_classifier_get_value_labels_returns_correct_labels_for_closed_classifier():
levels = [10, 20, 30, 40]
c = _get_test_levels_classifier(levels=levels)
assert c.get_value_labels(len(levels)) == \
["<= 10", "<= 20", "<= 30", "<= 40"]
assert c.get_value_labels(len(levels) - 1) == \
["<= 10", "<= 20", "<= 30"]
def test_levels_classifier_get_value_labels_returns_correct_labels_for_open_classifier():
levels = [10, 20, 30, 40]
c = _get_test_levels_classifier(levels=levels, closed=False)
assert c.get_value_labels(len(levels) + 1) == \
["<= 10", "<= 20", "<= 30", "<= 40", "> 40"]
assert c.get_value_labels(len(levels)) == \
["<= 10", "<= 20", "<= 30", "<= 40"]
assert c.get_value_labels(len(levels) - 1) == \
["<= 10", "<= 20", "<= 30"]
| mit |
xavierwu/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
xapple/plumbing | testing/database/access_db/test_access_db.py | 1 | 1153 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Typically you would run this file from a command line like this:
ipython.exe -i -- /deploy/plumbing/tests/database/access_db/test_access_db.py
"""
# Built-in module #
import inspect, os
# Internal modules #
from plumbing.databases.access_database import AccessDatabase
from autopaths.file_path import FilePath
# Third party modules #
import pandas
# Constants #
file_name = inspect.getframeinfo(inspect.currentframe()).filename
this_dir = os.path.dirname(os.path.abspath(file_name)) + '/'
# Never modify the original #
orig_db = FilePath(this_dir + 'orig.mdb')
testing_db = FilePath(this_dir + 'testing.mdb')
orig_db.copy(testing_db)
# The database #
db = AccessDatabase(testing_db)
# Test #
print(db.tables)
print(db['tblClassifierSets'])
# Create df #
df = pandas.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
# Insert table #
db.insert_df('dataframe', df)
# Close #
db.close() | mit |
felipemontefuscolo/bitme | common/quote.py | 1 | 1314 | from common import Symbol
import pandas as pd
class Quote:
def __init__(self,
symbol: Symbol,
timestamp: pd.Timestamp = None,
bid_size=None,
bid_price=None,
ask_size=None,
ask_price=None):
self.timestamp = timestamp
self.symbol = symbol
self.bid_size = bid_size
self.bid_price = bid_price
self.ask_size = ask_size
self.ask_price = ask_price
@staticmethod
def from_raw(raw: dict):
return Quote(symbol=Symbol[raw['symbol']],
timestamp=pd.Timestamp(raw['timestamp']),
bid_size=raw['bidSize'],
bid_price=raw['bidPrice'],
ask_size=raw['askSize'],
ask_price=raw['askPrice'])
def update_from_bitmex(self, raw: dict):
assert self.symbol.name == raw['symbol']
self.timestamp = pd.Timestamp(raw['timestamp'])
self.bid_size = raw['bidSize']
self.bid_price = raw['bidPrice']
self.ask_size = raw['askSize']
self.ask_price = raw['askPrice']
return self
def w_mid(self):
return (self.bid_size * self.bid_price + self.ask_size * self.ask_price) / (self.bid_size + self.ask_size) | mpl-2.0 |
ioam/param | tests/API1/testpandas.py | 2 | 7360 | """
Test Parameters based on pandas
"""
import unittest
import os
import param
from . import API1TestCase
try:
import pandas
except ImportError:
if os.getenv('PARAM_TEST_PANDAS','0') == '1':
raise ImportError("PARAM_TEST_PANDAS=1 but pandas not available.")
else:
raise unittest.SkipTest("pandas not available")
class TestDataFrame(API1TestCase):
def test_dataframe_positional_argument(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]},
columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(valid_df)
def test_empty_dataframe_param_invalid_set(self):
empty = pandas.DataFrame()
class Test(param.Parameterized):
df = param.DataFrame(default=empty)
test = Test()
exception = "Parameter 'df' value must be an instance of DataFrame, not '3'"
with self.assertRaisesRegexp(ValueError, exception):
test.df = 3
def test_dataframe_unordered_column_set_valid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns={'a', 'b'})
def test_dataframe_unordered_column_set_invalid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'd':[4,5]}, columns=['b', 'a', 'd'])
invalid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns={'a', 'd'})
test = Test()
self.assertEquals(test.param.params('df').ordered, False)
exception = "Provided DataFrame columns \['b', 'a', 'c'\] does not contain required columns \['a', 'd'\]"
with self.assertRaisesRegexp(ValueError, exception):
test.df = invalid_df
def test_dataframe_ordered_column_list_valid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
test = param.DataFrame(default=valid_df, columns=['b', 'a', 'c'])
def test_dataframe_ordered_column_list_invalid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'd':[4,5]}, columns=['b', 'a', 'd'])
invalid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['a', 'b', 'd'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns=['b', 'a', 'd'])
test = Test()
self.assertEquals(test.param.params('df').ordered, True)
exception = "Provided DataFrame columns \['a', 'b', 'd'\] must exactly match \['b', 'a', 'd'\]"
with self.assertRaisesRegexp(ValueError, exception):
test.df = invalid_df
def test_dataframe_unordered_column_number_valid_df(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns=3)
def test_dataframe_unordered_column_number_invalid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
invalid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3]}, columns=['b', 'a'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns=3)
test = Test()
self.assertEquals(test.param.params('df').ordered, None)
exception = "Column length 2 does not match declared bounds of 3"
with self.assertRaisesRegexp(ValueError, exception):
test.df = invalid_df
def test_dataframe_unordered_column_tuple_valid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, columns=(None,3))
def test_dataframe_unordered_column_tuple_invalid(self):
invalid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
exception = "Columns length 3 does not match declared bounds of \(None, 2\)"
with self.assertRaisesRegexp(ValueError, exception):
class Test(param.Parameterized):
df = param.DataFrame(default=invalid_df, columns=(None,2))
def test_dataframe_row_number_valid_df(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, rows=2)
def test_dataframe_row_number_invalid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3]}, columns=['b', 'a'])
invalid_df = pandas.DataFrame({'a':[1,2,4], 'b':[2,3,4]}, columns=['b', 'a'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, rows=2)
test = Test()
exception = "Row length 3 does not match declared bounds of 2"
with self.assertRaisesRegexp(ValueError, exception):
test.df = invalid_df
def test_dataframe_unordered_row_tuple_valid(self):
valid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
class Test(param.Parameterized):
df = param.DataFrame(default=valid_df, rows=(None,3))
def test_dataframe_unordered_row_tuple_invalid(self):
invalid_df = pandas.DataFrame({'a':[1,2], 'b':[2,3], 'c':[4,5]}, columns=['b', 'a', 'c'])
exception = "Row length 2 does not match declared bounds of \(5, 7\)"
with self.assertRaisesRegexp(ValueError, exception):
class Test(param.Parameterized):
df = param.DataFrame(default=invalid_df, rows=(5,7))
class TestSeries(API1TestCase):
def test_series_positional_argument(self):
valid_series = pandas.Series([1,2])
class Test(param.Parameterized):
series = param.Series(valid_series, rows=2)
def test_series_row_number_valid(self):
valid_series = pandas.Series([1,2])
class Test(param.Parameterized):
series = param.Series(default=valid_series, rows=2)
def test_series_row_number_invalid(self):
valid_series = pandas.Series([1,2])
invalid_series = pandas.Series([1,2,3])
class Test(param.Parameterized):
series = param.Series(default=valid_series, rows=2)
test = Test()
exception = "Row length 3 does not match declared bounds of 2"
with self.assertRaisesRegexp(ValueError, exception):
test.series = invalid_series
def test_series_unordered_row_tuple_valid(self):
valid_series = pandas.Series([1,2,3])
class Test(param.Parameterized):
series = param.Series(default=valid_series, rows=(None,3))
def test_series_unordered_row_tuple_invalid(self):
invalid_series = pandas.Series([1,2])
exception = "Row length 2 does not match declared bounds of \(5, 7\)"
with self.assertRaisesRegexp(ValueError, exception):
class Test(param.Parameterized):
series = param.Series(default=invalid_series, rows=(5,7))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
Frankkkkk/arctic | arctic/serialization/numpy_arrays.py | 1 | 6258 | import logging
import numpy as np
import numpy.ma as ma
import pandas as pd
from bson import Binary, SON
from .._compression import compress, decompress, compress_array
from ._serializer import Serializer
DATA = 'd'
MASK = 'm'
TYPE = 't'
DTYPE = 'dt'
COLUMNS = 'c'
INDEX = 'i'
METADATA = 'md'
LENGTHS = 'ln'
class FrameConverter(object):
"""
Converts a Pandas Dataframe to and from PyMongo SON representation:
{
METADATA: {
COLUMNS: [col1, col2, ...] list of str
MASKS: {col1: mask, col2: mask, ...} dict of str: Binary
INDEX: [idx1, idx2, ...] list of str
TYPE: 'series' or 'dataframe'
LENGTHS: {col1: len, col2: len, ...} dict of str: int
}
DATA: BINARY(....) Compressed columns concatenated together
}
"""
def _convert_types(self, a):
"""
Converts object arrays of strings to numpy string arrays
"""
# No conversion for scalar type
if a.dtype != 'object':
return a, None
# We can't infer the type of an empty array, so just
# assume strings
if len(a) == 0:
return a.astype('U1'), None
# Compute a mask of missing values. Replace NaNs and Nones with
# empty strings so that type inference has a chance.
mask = pd.isnull(a)
if mask.sum() > 0:
a = a.copy()
np.putmask(a, mask, '')
else:
mask = None
if pd.lib.infer_dtype(a) == 'mixed':
a = np.array([s.encode('ascii') for s in a])
a = a.astype('O')
type_ = pd.lib.infer_dtype(a)
if type_ in ['unicode', 'string']:
max_len = pd.lib.max_len_string_array(a)
return a.astype('U{:d}'.format(max_len)), mask
else:
raise ValueError('Cannot store arrays with {} dtype'.format(type_))
def docify(self, df):
"""
Convert a Pandas DataFrame to SON.
Parameters
----------
df: DataFrame
The Pandas DataFrame to encode
"""
dtypes = {}
masks = {}
lengths = {}
columns = []
data = Binary(b'')
start = 0
arrays = []
for c in df:
try:
columns.append(str(c))
arr, mask = self._convert_types(df[c].values)
dtypes[str(c)] = arr.dtype.str
if mask is not None:
masks[str(c)] = Binary(compress(mask.tostring()))
arrays.append(arr.tostring())
except Exception as e:
typ = pd.lib.infer_dtype(df[c])
msg = "Column '{}' type is {}".format(str(c), typ)
logging.info(msg)
raise e
arrays = compress_array(arrays)
for index, c in enumerate(df):
d = Binary(arrays[index])
lengths[str(c)] = (start, start + len(d) - 1)
start += len(d)
data += d
doc = SON({DATA: data, METADATA: {}})
doc[METADATA] = {COLUMNS: columns,
MASK: masks,
LENGTHS: lengths,
DTYPE: dtypes
}
return doc
def objify(self, doc, columns=None):
"""
Decode a Pymongo SON object into an Pandas DataFrame
"""
cols = columns or doc[METADATA][COLUMNS]
data = {}
for col in cols:
d = decompress(doc[DATA][doc[METADATA][LENGTHS][col][0]: doc[METADATA][LENGTHS][col][1] + 1])
d = np.fromstring(d, doc[METADATA][DTYPE][col])
if MASK in doc[METADATA] and col in doc[METADATA][MASK]:
mask_data = decompress(doc[METADATA][MASK][col])
mask = np.fromstring(mask_data, 'bool')
d = ma.masked_array(d, mask)
data[col] = d
return pd.DataFrame(data, columns=cols)[cols]
class FrametoArraySerializer(Serializer):
TYPE = 'FrameToArray'
def __init__(self):
self.converter = FrameConverter()
def serialize(self, df):
if isinstance(df, pd.Series):
dtype = 'series'
df = df.to_frame()
else:
dtype = 'dataframe'
if (len(df.index.names) > 1 and None in df.index.names) or None in list(df.columns.values):
raise Exception("All columns and indexes must be named")
if df.index.names != [None]:
index = df.index.names
df = df.reset_index()
ret = self.converter.docify(df)
ret[METADATA][INDEX] = index
ret[METADATA][TYPE] = dtype
return ret
ret = self.converter.docify(df)
ret[METADATA][TYPE] = dtype
return ret
def deserialize(self, data, columns=None):
'''
Deserializes SON to a DataFrame
Parameters
----------
data: SON data
columns: None, or list of strings
optionally you can deserialize a subset of the data in the SON. Index
columns are ALWAYS deserialized, and should not be specified
Returns
-------
pandas dataframe or series
'''
if data == []:
return pd.DataFrame()
meta = data[0][METADATA] if isinstance(data, list) else data[METADATA]
index = INDEX in meta
if columns:
if index:
columns.extend(meta[INDEX])
if len(columns) > len(set(columns)):
raise Exception("Duplicate columns specified, cannot de-serialize")
if not isinstance(data, list):
df = self.converter.objify(data, columns)
else:
df = pd.concat([self.converter.objify(d, columns) for d in data], ignore_index=not index)
if index:
df = df.set_index(meta[INDEX])
if meta[TYPE] == 'series':
return df[df.columns[0]]
return df
def combine(self, a, b):
if a.index.names != [None]:
return pd.concat([a, b]).sort_index()
return pd.concat([a, b])
| lgpl-2.1 |
CallaJun/hackprince | indico/skimage/io/manage_plugins.py | 7 | 10329 | """Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
try:
from configparser import ConfigParser # Python 3
except ImportError:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins can be found using "
"`skimage.io.plugins()`.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
| lgpl-3.0 |
bravelittlescientist/kdd-particle-physics-ml-fall13 | src/nearest_neighbors.py | 1 | 1162 | #!/usr/bin/python2
# This is a NN classifier based on the scikit-learn documentation.
#
# http://scikit-learn.org/stable/modules/neighbors.html
import sys
from imputation import load_data
from util import shuffle_split
from metrics import suite
from sklearn.neighbors import KNeighborsClassifier
def train(Xtrain, Ytrain):
""" Use entirety of provided X, Y to predict
Default Arguments
Xtrain -- Training data
Ytrain -- Training prediction
Named Arguments
--
Returns
classifier -- a tree fitted to Xtrain and Ytrain
"""
classifier = KNeighborsClassifier(125)
classifier.fit(Xtrain, Ytrain)
return classifier
if __name__ == "__main__":
# Let's take our training data and train a decision tree
# on a subset. Scikit-learn provides a good module for cross-
# validation.
if len(sys.argv) < 2:
print "Usage: $ python decision-tree.py /path/to/data/file/"
else:
training = sys.argv[1]
X,Y,n,f = load_data(training)
Xt, Xv, Yt, Yv = shuffle_split(X,Y)
Classifier = train(Xt, Yt)
print "KNN Accuracy"
suite(Yv, Classifier.predict(Xv))
| gpl-2.0 |
PalmDr/XRD-Data-Analysis-Toolkit | Beta1.4/DataAnalysisClass.py | 1 | 4047 | __author__ = 'j'
import os
from tkinter import *
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from Builder import *
from TreeView import *
from ConvertCSV import *
from SQ_Pilatus import *
class DataAnalysis():
def __init__(self):
self.root = Tk()
self.sub_1, self.sub_2, self.sub_1_1, self.sub_1_2, self.sub_2_1, self.sub_2_2, self.sub_2_2_1, self.sub_2_2_2,\
self.sub_2_2_2_1, self.sub_2_2_2_2 = buildFrame(self.root)
self.treeview, self.entry = buildTree(self.sub_1_1, self.sub_1_2)
self.selected = buildSelectedBox(self.sub_2_2_1)
self.choosefilebutton = Button(master=self.sub_1_2, text='Choose Directory',
command=self.choosefile)
self.choosefilebutton.pack(side=RIGHT,anchor = 'e', fill='x')
self.selectbutton = Button(master=self.sub_2_1, text='>>',command=self.selectitems)
self.selectbutton.pack(fill=X)
self.unselectbutton = Button(master=self.sub_2_1, text='<<', command=self.unselectitems)
self.unselectbutton.pack(fill=X)
self.plot = Button(master=self.sub_2_1, text='Plot', command=self.childWindow)
self.plot.pack(fill=X)
self.convert = Button(master=self.sub_2_1, text='Convert',command=self.SQ)
self.convert.pack()
self.LaB6, self.CSV = buildDirectoryDisplay(self.sub_2_2_2_1, self.sub_2_2_2_2)
self.LaB6location = None
self.CSVlocation = None
self.LaB6button = Button(master=self.sub_2_2_2_1, text='Set LaB6 File', command=self.setLaB6button)
self.LaB6button.pack(side=RIGHT,anchor = 'e', fill='x')
self.CSVbutton = Button(master=self.sub_2_2_2_2, text='Set CSV folder', command=self.setCSVbutton)
self.CSVbutton.pack(side=RIGHT,anchor = 'e', fill='x')
self.root.mainloop()
def choosefile(self):
filepath = filedialog.askdirectory()
if filepath:
self.treeview.delete(*self.treeview.get_children())
self.entry.insert(0,filepath)
dfpath = os.path.abspath(filepath)
node = self.treeview.insert('', 'end', text=dfpath,
values=[dfpath, "directory"], open=True)
fill_tree(self.treeview, node)
def selectitems(self):
temp = self.treeview.selection()
for key in temp:
if self.treeview.item(key)['values']:
self.selected.insert(END, self.treeview.item(key)['values'][0])
def unselectitems(self):
temp = self.selected.curselection()
temp2 = []
for key in temp:
temp2.insert(0, key)
for key in temp2:
self.selected.delete(key)
def childWindow(self):
win2 = Toplevel()
temp = self.selected.curselection()
temp2 = []
for key in temp:
temp2.append(self.selected.get(int(key)))
allcsv = convertCSV(temp2, 10)
f = Figure()
a = f.add_subplot(111)
for column in allcsv.columns:
a.semilogy(allcsv.index, allcsv[column])
a.set_xlabel('Q Value')
a.set_ylabel('Intensity')
canvas = FigureCanvasTkAgg(f, master=win2)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, win2)
def SQ(self):
temp = self.selected.curselection()
temp2 = []
for key in temp:
temp2.append(self.selected.get(int(key)))
# SQ_Pilatus(temp2, self.LaB6location, self.CSVlocation)
def setLaB6button(self):
filepath = filedialog.askopenfilename()
if filepath:
self.LaB6.insert(0,filepath)
self.LaB6location = filepath
return
def setCSVbutton(self):
filepath = filedialog.askdirectory()
if filepath:
self.CSV.insert(0,filepath)
self.CSVlocation = filepath
return | apache-2.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/integrate/quadrature.py | 20 | 28269 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| mit |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_find_eog_artifacts.py | 24 | 1228 | """
==================
Find EOG artifacts
==================
Locate peaks of EOG to spot blinks and general EOG artifacts.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 998
eog_events = mne.preprocessing.find_eog_events(raw, event_id)
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
exclude='bads')
tmin, tmax = -0.2, 0.2
epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected EOG artifacts : %d" % len(data))
###############################################################################
# Plot EOG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('EOG (muV)')
plt.show()
| bsd-3-clause |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/kmpfit_voigt.py | 1 | 4841 | #!/usr/bin/env python
#------------------------------------------------------------
# Script which demonstrates how to find the best-fit
# parameters of a Voigt line-shape model
#
# Vog, 26 Mar 2012
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from scipy.special import wofz
from kapteyn import kmpfit
ln2 = numpy.log(2)
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A, a=0, b=0):
# The Voigt line shape in terms of its physical parameters
f = numpy.sqrt(ln2)
x = (nu-nu_0)/alphaD * f
y = alphaL/alphaD * f
backg = a + b*nu
V = A*f/(alphaD*numpy.sqrt(numpy.pi)) * voigt(x, y) + backg
return V
def funcV(p, x):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, a, b = p
return Voigt(x, alphaD, alphaL, nu_0, I, a, b)
def funcG(p, x):
# Model function is a gaussian
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )
def residualsV(p, data):
# Return weighted residuals of Voigt
x, y, err = data
return (y-funcV(p,x)) / err
def residualsG(p, data):
# Return weighted residuals of Gauss
x, y, err = data
return (y-funcG(p,x)) / err
# Data from simulated MUSE cube
x = numpy.array([854.05,854.18,854.31,854.44,854.57,854.7,854.83,854.96,\
855.09,855.22,855.35,855.48,855.61,855.74,855.87,856.0,\
856.13,856.26,856.39,856.52,856.65,856.78,856.91])
y = numpy.array([6.31683382764,6.41273839772,6.43047296256,6.37437933311,\
6.34883451462,6.30711287633,6.24409954622,6.09241716936,\
5.75421549752,5.20381929725,4.18020502292,3.64663145132,\
4.25251198746,5.23945118487,5.76701752096,6.06587703526,\
6.15751018003,6.25985588506,6.35063433647,6.41795488447,\
6.42002335563,6.35883554071,6.36915982142])
N = len(y)
err = numpy.ones(N)
A = -2
alphaD = 0.5
alphaL = 0.5
a = 6
b = 0
nu_0 = 855
p0 = [alphaD, alphaL, nu_0, A, a, b]
# Do the fit
fitter = kmpfit.Fitter(residuals=residualsV, data=(x,y,err))
fitter.parinfo = [{}, {}, {}, {}, {}, {'fixed':True}] # Take zero level fixed in fit
fitter.fit(params0=p0)
print("\n========= Fit results Voigt profile ==========")
print("Initial params:", fitter.params0)
print("Params: ", fitter.params)
print("Iterations: ", fitter.niter)
print("Function ev: ", fitter.nfev)
print("Uncertainties: ", fitter.xerror)
print("dof: ", fitter.dof)
print("chi^2, rchi2: ", fitter.chi2_min, fitter.rchi2_min)
print("stderr: ", fitter.stderr)
print("Status: ", fitter.status)
alphaD, alphaL, nu_0, I, a_back, b_back = fitter.params
c1 = 1.0692
c2 = 0.86639
hwhm = 0.5*(c1*alphaL+numpy.sqrt(c2*alphaL**2+4*alphaD**2))
print("\nFWHM Voigt profile: ", 2*hwhm)
f = numpy.sqrt(ln2)
Y = alphaL/alphaD * f
amp = I/alphaD*numpy.sqrt(ln2/numpy.pi)*voigt(0,Y)
print("Amplitude Voigt profile:", amp)
print("Area under profile: ", I)
# Fit the Gaussian model
p0 = [-3, 855, 0.5, 6.3]
fitterG = kmpfit.Fitter(residuals=residualsG, data=(x,y,err))
#fitterG.parinfo = [{}, {}, {}, {}, {}] # Take zero level fixed in fit
fitterG.fit(params0=p0)
print("\n========= Fit results Gaussian profile ==========")
print("Initial params:", fitterG.params0)
print("Params: ", fitterG.params)
print("Iterations: ", fitterG.niter)
print("Function ev: ", fitterG.nfev)
print("Uncertainties: ", fitterG.xerror)
print("dof: ", fitterG.dof)
print("chi^2, rchi2: ", fitterG.chi2_min, fitterG.rchi2_min)
print("stderr: ", fitterG.stderr)
print("Status: ", fitterG.status)
fwhmG = 2*numpy.sqrt(2*numpy.log(2))*fitterG.params[2]
print("FWHM Gaussian: ", fwhmG)
# Plot the result
rc('legend', fontsize=6)
fig = figure()
frame1 = fig.add_subplot(1,1,1)
xd = numpy.linspace(x.min(), x.max(), 200)
frame1.plot(x, y, 'bo', label="data")
label = "Model with Voigt function"
frame1.plot(xd, funcV(fitter.params,xd), 'g', label=label)
label = "Model with Gaussian function"
frame1.plot(xd, funcG(fitterG.params,xd), 'm', ls='--', label=label)
offset = a_back+b_back*nu_0
frame1.plot((nu_0-hwhm,nu_0+hwhm), (offset+amp/2,offset+amp/2), 'r', label='fwhm')
frame1.plot(xd, a_back+b_back*xd, "y", label='Background')
frame1.set_xlabel("$\\nu$")
frame1.set_ylabel("$\\phi(\\nu)$")
vals = (fitter.chi2_min, fitter.rchi2_min, fitter.dof)
title = "Profile data with Voigt- vs. Gaussian model"
frame1.set_title(title, y=1.05)
frame1.grid(True)
leg = frame1.legend(loc=3)
show() | bsd-3-clause |
hfutsuchao/Python2.6 | stocks/strategy_stock_tech_corr_bak_nonapart.py | 1 | 14626 | #coding:utf-8
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
from sqlalchemy.orm import sessionmaker
import talib
import matplotlib.pyplot as plt
from sklearn import preprocessing
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import time
from commfunction import date_add
DB_CONNECT_STRING = 'sqlite:///stock_US_data.db'
engine = create_engine(DB_CONNECT_STRING,echo=False)
DB_Session = sessionmaker(bind=engine)
session = DB_Session()
def get_quota(df):
close = df['close']
ma5 = talib.MA(df['close'].values,5)
ma10 = talib.MA(df['close'].values,10)
ma20 = talib.MA(df['close'].values,20)
ma30 = talib.MA(df['close'].values,30)
K, D = talib.STOCH(df['high'].values,df['low'].values,df['close'].values, fastk_period=9, slowk_period=3)
J = K * 3 - D * 2
sar = talib.SAR(df['high'].values, df['low'].values, acceleration=0.05, maximum=0.2)
sar = pd.DataFrame(sar-close)
sar.index = df.date
atr = talib.ATR(df['high'].values,df['low'].values,df['close'].values)
natr = talib.NATR(df['high'].values,df['low'].values,df['close'].values)
trange = talib.TRANGE(df['high'].values,df['low'].values,df['close'].values)
cci = talib.CCI(df['high'].values,df['low'].values,df['close'].values,14)
dif, dea, bar = talib.MACDFIX(df['close'].values)
bar = bar * 2
df_all = df.drop(['code','open','low', 'high','volume'],axis=1).set_index('date')
df_all.insert(0,'ma5',ma5)
df_all.insert(0,'ma10',ma10)
df_all.insert(0,'ma20',ma20)
df_all.insert(0,'ma30',ma30)
df_all.insert(0,'K',K)
df_all.insert(0,'D',D)
df_all.insert(0,'J',J)
df_all.insert(0,'cci',cci)
df_all.insert(0,'bar',bar)
df_all.insert(0,'dif',dif)
df_all.insert(0,'dea',dea)
df_all.insert(0,'sar',sar)
df_yesterday = df_all.T
index_c = df_all.index
added = [np.nan] * len(df_all.columns)
df_yesterday.insert(0, len(df_yesterday.columns), added)
df_yesterday = df_yesterday.T
df_yesterday = df_yesterday.drop(df_all.index[len(df_all.index)-1])
df_yesterday.insert(0, 'index_c', index_c)
df_yesterday = df_yesterday.set_index('index_c')
df_dif = df_all - df_yesterday
df_dif_close_plus_one_day = df_dif.copy()
for i in range(len(df_dif_close_plus_one_day['close'])-1):
df_dif_close_plus_one_day['close'][i] = df_dif_close_plus_one_day['close'][i+1]
df_dif_close_plus_one_day['close'][len(df_dif_close_plus_one_day['close'])-1] = np.nan
df_dif = df_dif.dropna(axis=0,how='any')
df_dif_close_plus_one_day = df_dif_close_plus_one_day.dropna(axis=0,how='any')
return df_dif, df_dif_close_plus_one_day
def parse(df,start_date='0',date_delta=60,lost=1.0):
buy_price = {}
buy_date = {}
sell_price = {}
sell_date = {}
is_buy = {}
is_sell = {}
rate = {}
start_date_open = 0
end_date_open = 0
rate['based'] = {}
rate['based']['profit'] = {}
if start_date:
end_date = date_add(start_date,date_delta)
else:
end_date = '9'
'''df = pd.read_sql('select * from day_k_data where code="'+code+'" order by date asc;',engine)
buy_price = {}
buy_date = {}
sell_price = {}
sell_date = {}
is_buy = {}
is_sell = {}
rate = {}
rate['based'] = {}
rate['based']['profit'] = {}
close = df['close']
ma5 = talib.MA(df['close'].values,5)
ma10 = talib.MA(df['close'].values,10)
ma20 = talib.MA(df['close'].values,20)
ma30 = talib.MA(df['close'].values,30)
K, D = talib.STOCH(df['high'].values,df['low'].values,df['close'].values, fastk_period=9, slowk_period=3)
J = K * 3 - D * 2
sar = talib.SAR(df['high'].values, df['low'].values, acceleration=0.05, maximum=0.2)
sar = pd.DataFrame(sar-close)
sar.index = df.date
atr = talib.ATR(df['high'].values,df['low'].values,df['close'].values)
natr = talib.NATR(df['high'].values,df['low'].values,df['close'].values)
trange = talib.TRANGE(df['high'].values,df['low'].values,df['close'].values)
cci = talib.CCI(df['high'].values,df['low'].values,df['close'].values,14)
dif, dea, bar = talib.MACDFIX(df['close'].values)
bar = bar * 2'''
'''df_all = df.drop(['code','open','low', 'high','volume'],axis=1).set_index('date')
df_all.insert(0,'ma5',ma5)
df_all.insert(0,'ma10',ma10)
df_all.insert(0,'ma20',ma20)
df_all.insert(0,'ma30',ma30)
df_all.insert(0,'K',K)
df_all.insert(0,'D',D)
df_all.insert(0,'J',J)
df_all.insert(0,'cci',cci)
df_all.insert(0,'bar',bar)
df_all.insert(0,'dif',dif)
df_all.insert(0,'dea',dea)
df_all.insert(0,'sar',sar)
df_yesterday = df_all.T
index_c = df_all.index
added = [np.nan] * len(df_all.columns)
df_yesterday.insert(0, len(df_yesterday.columns), added)
df_yesterday = df_yesterday.T
df_yesterday = df_yesterday.drop(df_all.index[len(df_all.index)-1])
df_yesterday.insert(0, 'index_c', index_c)
df_yesterday = df_yesterday.set_index('index_c')
df_dif = df_all - df_yesterday
df_dif_close_plus_one_day = df_dif.copy()
for i in range(len(df_dif_close_plus_one_day['close'])-1):
df_dif_close_plus_one_day['close'][i] = df_dif_close_plus_one_day['close'][i+1]
df_dif_close_plus_one_day['close'][len(df_dif_close_plus_one_day['close'])-1] = np.nan
df_dif = df_dif.dropna(axis=0,how='any')
df_dif_close_plus_one_day = df_dif_close_plus_one_day.dropna(axis=0,how='any')
'''
df_dif, df_dif_close_plus_one_day = get_quota(df)
df_dif_corr = df_dif.corr().ix['close']
df_dif_close_plus_one_day_corr = df_dif_close_plus_one_day.corr().ix['close']
df_dif_norm_1 = df_dif_norm = df_dif.copy()
df_dif_close_plus_one_day_norm_1 = df_dif_close_plus_one_day_norm = df_dif_close_plus_one_day.copy()
for column in df_dif.columns:
df_dif_norm[column] = df_dif[column] / abs(df_dif[column]).max()
for column in df_dif_close_plus_one_day.columns:
df_dif_close_plus_one_day_norm[column] = df_dif_close_plus_one_day[column] / abs(df_dif_close_plus_one_day[column]).max()
for column in df_dif_norm_1.columns:
df_dif_norm_1[column].ix[df_dif_norm[column] <= 0] = -1
df_dif_norm_1[column].ix[df_dif_norm[column] > 0] = 1
for column in df_dif_close_plus_one_day_norm_1.columns:
df_dif_close_plus_one_day_norm_1[column].ix[df_dif_close_plus_one_day_norm[column] <= 0] = -1
df_dif_close_plus_one_day_norm_1[column].ix[df_dif_close_plus_one_day_norm[column] > 0] = 1
df_dif_norm_corr = df_dif_norm.corr().ix['close']
df_dif_close_plus_one_day_norm_corr = df_dif_close_plus_one_day_norm.corr().ix['close']
#print df_dif_norm_corr,df_dif_close_plus_one_day_norm_corr
df_corr = df_dif_close_plus_one_day_norm_corr
df = pd.concat([df.set_index('date')['close'],df_dif_norm['sar']],axis=1).dropna(how='any')
close = df['close']
df_dif_norm = df_dif_norm_1
for idx in range(len(df_dif_norm)):
date_this = df_dif_norm.index[idx]
if date_this < start_date:
continue
if date_this > end_date:
end_date_open = close_val
break
close_val = close[idx]
sign = 0
for key_name in df_dif_norm.drop('close',axis=1).columns:
sign = sign + df_dif_norm.ix[date_this,key_name] * df_corr[key_name]
if start_date_open == 0:
start_date_open = close_val
if idx>=1:
lastdate = df_dif_norm.index[idx-1]
if lastdate not in rate['based']['profit']:
rate['based']['profit'][lastdate] = 1.0
rate['based']['profit'][date_this] = rate['based']['profit'][lastdate] * close[idx] / close[idx-1]
for m in np.array(range(-100,100,5))/50.0:
for n in np.array(range(-100,int(50*m+1),5))/50.0:
s_type = start_date + '_' + end_date + '_' + 'corr' + str(m) + '_' + str(n)
if s_type not in buy_price:
buy_price[s_type] = []
buy_date[s_type] = []
sell_price[s_type] = []
sell_date[s_type] = []
is_buy[s_type] = 0
#is_sell[s_type] = 0
if sign>=m:
if is_buy[s_type] == 0:
is_buy[s_type] = 1
buy_price[s_type].append(close_val)
buy_date[s_type].append(date_this)
#is_sell[s_type] = 0
continue
if sign<n or (len(buy_price[s_type]) and close_val * (1-0.002) / buy_price[s_type][-1] <= (1-lost)):
if is_buy[s_type] == 1 : #and is_sell[s_type] == 0
is_buy[s_type] = 0
sell_price[s_type].append(close_val)
sell_date[s_type].append(date_this)
#is_sell[s_type] = 1
if not end_date_open:
end_date_open = close_val
rate['based']['profit']['total'] = end_date_open * (1 - 0.002) / start_date_open
for s_type in sell_price:
rate[s_type] = {}
rate[s_type]['profit'] = {}
rate[s_type]['profit']['total'] = 1.0
rate[s_type]['trade'] = {}
for i in range(len(buy_price[s_type])):
try:
#rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i]) * ((sell_price[s_type][i]) * (1 - 0.002) / buy_price[s_type][i+1])
rate[s_type]['profit'][buy_date[s_type][i]] = rate[s_type]['profit']['total']
rate[s_type]['trade'][buy_date[s_type][i]] = [buy_date[s_type][i], buy_price[s_type][i], sell_date[s_type][i], sell_price[s_type][i]]
except Exception,e:
if len(buy_price[s_type]) == len(sell_price[s_type]):
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / sell_price[s_type][i])
else:
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit'][date_this] = rate[s_type]['profit']['total']
rate[s_type]['trade'][date_this] = [buy_date[s_type][i], buy_price[s_type][i], 'lastday', end_date_open]
return sorted(rate.items(),key=lambda x:x[1]['profit']['total'],reverse=True)
def plot_profit(rate,s_type=''):
for code in rate:
best_strategy_code = rate[code][0][0]
rate_dic = dict(rate[code])
based_profit = pd.DataFrame(rate_dic['based']).drop('total',axis=0)
if s_type:
best_strategy_profit = pd.DataFrame(rate_dic[s_type]).fillna(method='pad').drop('total',axis=0)
best_strategy_code = s_type
else:
if rate[code][0][0] == 'based':
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][1][0]]).fillna(method='pad').drop('total',axis=0)
else:
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][0][0]]).fillna(method='pad').drop('total',axis=0)
profit_all = pd.concat([based_profit['profit'], best_strategy_profit['profit']], axis=1).fillna(method='pad')
profit_all.plot()
plt.legend(('based_profit', 'best_strategy_profit'), loc='upper left')
plt.title(code + '_' + best_strategy_code)
plt.savefig('/Users/NealSu/Downloads/profit_pic/' + code + '_' + best_strategy_code + '.jpg')
plt.close('all')
try:
print code
print best_strategy_profit['trade']
except:
pass
def strategy_choose(rate):
strategy_sum = {}
best_strategy = {}
for code in rate:
rate_dic = dict(rate[code])
best_strategy_code = rate[code][0][0]
if best_strategy_code not in best_strategy:
best_strategy[best_strategy_code] = 1
else:
best_strategy[best_strategy_code] = best_strategy[best_strategy_code] + 1
for s_type in rate_dic:
if s_type not in strategy_sum:
strategy_sum[s_type] = rate_dic[s_type]['profit']['total']
else:
strategy_sum[s_type] = strategy_sum[s_type] + rate_dic[s_type]['profit']['total']
best_strategy = sorted(best_strategy.items(),key=lambda x:x[1],reverse=True)
strategy_sum = sorted(strategy_sum.items(),key=lambda x:x[1],reverse=True)
return (best_strategy,strategy_sum)
def main():
codes = session.execute('select distinct(code) from day_k_data;').fetchall()
rate = {}
start_date = '2016-01-01'
date_delta = 60
codes = [code[0] for code in codes]
date_deltas = range(10,360,10)
start_dates = [date_add(start_date,i) for i in range(10,100,10)]
print start_dates
#open_market_dates = session.execute('select distinct(code),date from day_k_data;').fetchall()
for code in codes[:1]:
print code
try:
df = pd.read_sql('select * from day_k_data where code="'+code+'" order by date asc;',engine)
rate[code] = parse(df, start_date, date_delta)
except Exception,e:
print e,'line 212'
continue
best_strategy, strategy_sum = strategy_choose(rate)
plot_profit(rate,strategy_sum[0][0])
#plot_profit(rate)
print 'Best strategy:'
for elm in best_strategy:
print elm[0],elm[1]
print 'The Best strategy:'
print strategy_sum[0][0],strategy_sum[0][1]
if __name__ == '__main__':
main()
'''
pool = ThreadPool(2)
results = pool.map(parse, codes[:10])
pool.close()
pool.join()
print time.ctime()
for elm in results:
key = elm.keys()[0]
rate[key] = elm[key]
exit()
''' | gpl-2.0 |
kmike/scikit-learn | examples/covariance/plot_covariance_estimation.py | 4 | 4992 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
The usual estimator for covariance is the maximum likelihood estimator,
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotical optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
| bsd-3-clause |
evanbiederstedt/RRBSfun | trees/chrom_scripts/normal_chr20.py | 1 | 25844 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr20"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom20.phy", header=None, index=None)
print(tott.shape)
| mit |
UNR-AERIAL/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
btabibian/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
kylerbrown/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
Erotemic/ibeis | ibeis/annots.py | 1 | 17911 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import six
import itertools as it
from ibeis import _ibeis_object
from ibeis.control.controller_inject import make_ibs_register_decorator
(print, rrr, profile) = ut.inject2(__name__, '[annot]')
CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__)
BASE_TYPE = type
@register_ibs_method
def annots(ibs, aids=None, uuids=None, **kwargs):
""" Makes an Annots object """
if uuids is not None:
assert aids is None, 'specify one primary key'
aids = ibs.get_annot_aids_from_uuid(uuids)
if aids is None:
aids = ibs.get_valid_aids()
elif aids.__class__.__name__ == 'Annots':
return aids
aids = ut.ensure_iterable(aids)
return Annots(aids, ibs, **kwargs)
@register_ibs_method
def matches(ibs, ams=None, edges=None, uuid_edges=None, **kwargs):
""" Makes an Annots object """
if uuid_edges is not None:
assert ams is None, 'specify one primary key'
assert edges is None, 'specify one primary key'
uuids1, uuids2 = list(zip(*uuid_edges))
aids1 = ibs.get_annot_aids_from_uuid(uuids1)
aids2 = ibs.get_annot_aids_from_uuid(uuids2)
ams = ibs.get_annotmatch_rowid_from_undirected_superkey(aids1, aids2)
if edges is not None:
assert ams is None, 'specify one primary key'
assert uuid_edges is None, 'specify one primary key'
aids1, aids2 = list(zip(*edges))
ams = ibs.get_annotmatch_rowid_from_undirected_superkey(aids1, aids2)
if ams is None:
ams = ibs._get_all_annotmatch_rowids()
elif ams.__class__.__name__ == 'AnnotMatches':
return ams
ams = ut.ensure_iterable(ams)
return AnnotMatches(ams, ibs, **kwargs)
@register_ibs_method
def _annot_groups(ibs, aids_list=None, config=None):
annots_list = [ibs.annots(aids, config=config) for aids in aids_list]
return AnnotGroups(annots_list, ibs)
ANNOT_BASE_ATTRS = [
'aid',
'parent_aid',
'multiple',
'age_months_est_max', 'age_months_est_min', 'sex',
'sex_texts',
'uuids', 'hashid_uuid', 'visual_uuids', 'hashid_visual_uuid',
'semantic_uuids', 'hashid_semantic_uuid', 'verts', 'thetas',
'bboxes', 'bbox_area',
'species_uuids', 'species', 'species_rowids', 'species_texts',
'viewpoint_int', 'viewpoint_code',
'qualities', 'quality_texts', 'exemplar_flags',
# DEPRICATE YAW
'yaw_texts', 'yaws', 'yaws_asfloat',
# Images
# 'image_rowids',
'gids', 'image_uuids',
'image_gps', 'image_gps2',
'image_unixtimes_asfloat',
'image_datetime_str', 'image_contributor_tag',
# Names
'nids', 'names', 'name_uuids',
# Inferred from context attrs
'contact_aids', 'num_contact_aids', 'groundfalse', 'groundtruth',
'num_groundtruth', 'has_groundtruth', 'otherimage_aids',
# Image Set
'imgset_uuids', 'imgsetids', 'image_set_texts',
# Occurrence / Encounter
'static_encounter',
'encounter_text', 'occurrence_text', 'primary_imageset',
# Tags
'all_tags',
'case_tags',
'annotmatch_tags', 'notes',
# Processing State
'reviewed', 'reviewed_matching_aids', 'has_reviewed_matching_aids',
'num_reviewed_matching_aids', 'detect_confidence',
]
ANNOT_SETTABLE_ATTRS = [
'age_months_est_max', 'age_months_est_min',
'bboxes', 'thetas', 'verts',
'qualities', 'quality_texts',
'viewpoint_int', 'viewpoint_code',
# DEPRICATE YAW
'yaw_texts', 'yaws',
'sex', 'sex_texts', 'species',
'exemplar_flags',
'static_encounter',
'multiple',
'case_tags',
'detect_confidence', 'reviewed',
'name_texts', 'names',
'notes',
'parent_rowid',
]
class _AnnotPropInjector(BASE_TYPE):
"""
Ignore:
>>> from ibeis import _ibeis_object
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> objname = 'annot'
>>> blacklist = ['annot_pair']
>>> _ibeis_object._find_ibeis_attrs(ibs, objname, blacklist)
"""
def __init__(metaself, name, bases, dct):
super(_AnnotPropInjector, metaself).__init__(name, bases, dct)
metaself.rrr = rrr
attrs = ANNOT_BASE_ATTRS
settable_attrs = ANNOT_SETTABLE_ATTRS
configurable_attrs = [
# Chip
'chip_dlensqrd', 'chip_fpath', 'chip_sizes', 'chip_thumbpath',
'chip_thumbtup', 'chips',
# Feat / FeatWeight / Kpts / Desc
'feat_rowids', 'num_feats', 'featweight_rowids', 'fgweights',
'fgweights_subset', 'kpts', 'kpts_distinctiveness', 'vecs',
'vecs_cache', 'vecs_subset',
]
#misc = [
# 'gar_rowids', 'alrids', 'alrids_oftype', 'lblannot_rowids',
# 'lblannot_rowids_oftype', 'lblannot_value_of_lbltype', 'rows',
# 'instancelist', 'lazy_dict', 'lazy_dict2', 'missing_uuid',
# 'been_adjusted', 'class_labels',
#]
#extra_attrs = [
# # Age / Sex
# 'age_months_est', 'age_months_est_max', 'age_months_est_max_texts',
# 'age_months_est_min', 'age_months_est_min_texts',
# 'age_months_est_texts', 'sex', 'sex_texts',
# # Stats
# 'stats_dict', 'per_name_stats', 'qual_stats', 'info', 'yaw_stats',
# 'intermediate_viewpoint_stats',
#]
#inverse_attrs = [
# # External lookups via superkeys
# 'aids_from_semantic_uuid',
# 'aids_from_uuid',
# 'aids_from_visual_uuid',
# 'rowids_from_partial_vuuids',
#]
depcache_attrs = [
('hog', 'hog'),
('probchip', 'img'),
]
aliased_attrs = {
'time': 'image_unixtimes_asfloat',
'gps': 'image_gps2',
'chip_size': 'chip_sizes',
'yaw': 'yaws_asfloat',
'qual': 'qualities',
'name': 'names',
'nid': 'nids',
'unary_tags': 'case_tags',
# DEPRICATE
'rchip': 'chips',
'rchip_fpath': 'chip_fpath',
}
objname = 'annot'
_ibeis_object._inject_getter_attrs(metaself, objname, attrs,
configurable_attrs, 'depc_annot',
depcache_attrs, settable_attrs,
aliased_attrs)
# TODO: incorporate dynamic setters
#def set_case_tags(self, tags):
# self._ibs.append_annot_case_tags(self._rowids, tags)
#fget = metaself.case_tags.fget
#fset = set_case_tags
#setattr(metaself, 'case_tags', property(fget, fset))
try:
from ibeis import _autogen_annot_base
BASE = _autogen_annot_base._annot_base_class
except ImportError:
BASE = _ibeis_object.ObjectList1D
# @ut.reloadable_class
@six.add_metaclass(_AnnotPropInjector)
class Annots(BASE):
"""
Represents a group of annotations. Efficiently accesses properties from a
database using lazy evaluation.
CommandLine:
python -m ibeis.annots Annots
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.annots import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aids = ibs.get_valid_aids()
>>> a = self = annots = Annots(aids, ibs)
>>> a.preload('vecs', 'kpts', 'nids')
>>> print(Annots.mro())
>>> print(ut.depth_profile(a.vecs))
>>> print(a)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.annots import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aids = ibs.get_valid_aids()
>>> a = self = annots = Annots(aids, ibs)
>>> a.preload('vecs', 'kpts', 'nids')
>>> a.disconnect()
>>> assert 'vecs' in a._internal_attrs.keys()
>>> assert a._ibs is None
>>> ut.assert_raises(KeyError, a._get_num_feats)
>>> a._ibs = ibs
>>> assert len(a._get_num_feats()) > 0
"""
#def __init__(self, aids, ibs, config=None, caching=False):
# super(Annots, self).__init__(aids, ibs, config, caching)
@property
def aids(self):
return self._rowids
def get_stats(self, **kwargs):
self._ibs.get_annot_stats_dict(self.aids, **kwargs)
def print_stats(self, **kwargs):
self._ibs.print_annot_stats(self.aids, **kwargs)
#@property
def get_speeds(self):
#import vtool_ibeis as vt
edges = self.get_aidpairs()
speeds = self._ibs.get_annotpair_speeds(edges)
#edges = vt.pdist_indicies(len(annots))
#speeds = self._ibs.get_unflat_annots_speeds_list([self.aids])[0]
edge_to_speed = dict(zip(edges, speeds))
return edge_to_speed
def get_name_image_closure(self):
ibs = self._ibs
aids = self.aids
old_aids = []
while len(old_aids) != len(aids):
old_aids = aids
gids = ut.unique(ibs.get_annot_gids(aids))
other_aids = list(set(ut.flatten(ibs.get_image_aids(gids))))
other_nids = list(set(ibs.get_annot_nids(other_aids)))
aids = ut.flatten(ibs.get_name_aids(other_nids))
return aids
def group2(self, by):
"""
self = annots
by = annots.static_encounter
encounters = annots.group2(annots.static_encounter)
"""
annots_list = self.group(by)[1]
return AnnotGroups(annots_list, self._ibs)
def get_aidpairs(self):
aids = self.aids
aid_pairs = list(it.combinations(aids, 2))
return aid_pairs
def get_am_rowids(self, internal=True):
"""
if `internal is True` returns am rowids only between
annotations in this Annots object, otherwise returns
any am rowid that contains any aid in this Annots object.
"""
ibs = self._ibs
if internal:
ams = ibs.get_annotmatch_rowids_between(self.aids, self.aids)
else:
ams = ut.flatten(ibs.get_annotmatch_rowids_from_aid(self.aids))
return ams
def matches(self, internal=True):
ams = self.get_am_rowids(internal)
return self._ibs.matches(ams)
def get_am_rowids_and_pairs(self):
ibs = self._ibs
ams = self.get_am_rowids()
aid_pairs = ibs.get_annotmatch_aids(ams)
# aid_pairs = self.get_aidpairs()
# aids1 = ut.take_column(aid_pairs, 0)
# aids2 = ut.take_column(aid_pairs, 1)
# ams = ibs.get_annotmatch_rowid_from_undirected_superkey(aids1, aids2)
# flags = ut.not_list(ut.flag_None_items(ams))
# ams = ut.compress(ams, flags)
# aid_pairs = ut.compress(aid_pairs, flags)
return ams, aid_pairs
def get_am_aidpairs(self):
ibs = self._ibs
ams = self.get_am_rowids()
aids1 = ibs.get_annotmatch_aid1(ams)
aids2 = ibs.get_annotmatch_aid2(ams)
aid_pairs = list(zip(aids1, aids2))
return aid_pairs
@property
def hog_img(self):
from ibeis import core_annots
return [core_annots.make_hog_block_image(hog) for hog in self.hog_hog]
def append_tags(self, tags):
self._ibs.append_annot_case_tags(self._rowids, tags)
def remove_tags(self, tags):
self._ibs.remove_annot_case_tags(self._rowids, tags)
def __hash__(self):
return hash(tuple(self.aids))
def __lt__(self, other):
if len(self.aids) == len(other.aids):
if len(self.aids) == 0:
return False
else:
return tuple(self) < tuple(other)
elif len(self.aids) < len(other.aids):
return True
else:
return False
def __eq__(self, other):
if len(self.aids) == len(other.aids):
return all(a == b for a, b in zip(self, other))
return False
def show(self, *args, **kwargs):
if len(self) != 1:
raise ValueError('Can only show one, got {}'.format(len(self)))
from ibeis.viz import viz_chip
for aid in self:
return viz_chip.show_chip(self._ibs, aid, *args, **kwargs)
class _AnnotGroupPropInjector(BASE_TYPE):
def __init__(metaself, name, bases, dct):
super(_AnnotGroupPropInjector, metaself).__init__(name, bases, dct)
metaself.rrr = rrr
# TODO: move to ibeis object as a group call
def _make_unflat_getter(objname, attrname):
ibs_funcname = 'get_%s_%s' % (objname, attrname)
def ibs_unflat_getter(self, *args, **kwargs):
ibs_callable = getattr(self._ibs, ibs_funcname)
rowids = self._rowids_list
ibs = self._ibs
return ibs.unflat_map(ibs_callable, rowids, *args, **kwargs)
ut.set_funcname(ibs_unflat_getter, 'unflat_' + ibs_funcname)
return ibs_unflat_getter
for attrname in ANNOT_BASE_ATTRS:
if hasattr(metaself, attrname):
print('Cannot inject annot group attrname = %r' % (attrname,))
continue
ibs_unflat_getter = _make_unflat_getter('annot', attrname)
setattr(metaself, '_unflat_get_' + attrname, ibs_unflat_getter)
setattr(metaself, attrname, property(ibs_unflat_getter))
@ut.reloadable_class
@six.add_metaclass(_AnnotGroupPropInjector)
class AnnotGroups(ut.NiceRepr):
""" Effciently handle operations on multiple groups of annotations """
def __init__(self, annots_list, ibs):
self._ibs = ibs
self.annots_list = annots_list
self._rowids_list = [a._rowids for a in self.annots_list]
def __len__(self):
return len(self.annots_list)
def __nice__(self):
import numpy as np
len_list = ut.lmap(len, self.annots_list)
num = len(self.annots_list)
mean = np.mean(len_list)
std = np.std(len_list)
if six.PY3:
nice = '(n=%r, μ=%.1f, σ=%.1f)' % (num, mean, std)
else:
nice = '(n=%r, m=%.1f, s=%.1f)' % (num, mean, std)
return nice
def __iter__(self):
return iter(self.annots_list)
def __getitem__(self, index):
return self.annots_list[index]
@property
def aids(self):
return [a.aids for a in self.annots_list]
@property
def images(self, config=None):
return self._ibs.images(self.gids, config)
@property
def match_tags(self):
""" returns pairwise tags within the annotation group """
ams_list = self._ibs.get_unflat_am_rowids(self.aids)
tags = self._ibs.unflat_map(self._ibs.get_annotmatch_case_tags, ams_list)
return tags
class _AnnotMatchPropInjector(BASE_TYPE):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis import _ibeis_object
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> objname = 'annotmatch'
>>> blacklist = []
>>> tup = _ibeis_object._find_ibeis_attrs(ibs, objname, blacklist)
>>> attrs, settable_attrs = tup
>>> print('attrs = ' + ut.repr4(attrs))
>>> print('settable_attrs = ' + ut.repr4(settable_attrs))
"""
def __init__(metaself, name, bases, dct):
super(_AnnotMatchPropInjector, metaself).__init__(name, bases, dct)
metaself.rrr = rrr
attrs = [
'aid1', 'aid2', 'confidence', 'count', 'evidence_decision',
'meta_decision', 'posixtime_modified', 'reviewer', 'tag_text',
'case_tags',
]
settable_attrs = [
'confidence', 'count', 'evidence_decision', 'meta_decision',
'posixtime_modified', 'reviewer', 'tag_text',
]
configurable_attrs = []
depcache_attrs = []
aliased_attrs = {}
objname = 'annotmatch'
_ibeis_object._inject_getter_attrs(metaself, objname, attrs,
configurable_attrs, None,
depcache_attrs, settable_attrs,
aliased_attrs)
@six.add_metaclass(_AnnotMatchPropInjector)
class AnnotMatches(BASE):
"""
Represents a group of annotations. Efficiently accesses properties from a
database using lazy evaluation.
CommandLine:
python -m ibeis.annots Annots
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.annots import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aids = ibs.get_valid_aids()
>>> annots = Annots(aids, ibs)
>>> ams = annots.get_am_rowids()
>>> matches = self = ibs.matches()
>>> ed1 = matches.evidence_decision
>>> md2 = matches.meta_decision
>>> table = ibs.db.get_table_as_pandas('annotmatch')
>>> assert len(table) == len(matches)
"""
@property
def edges(self):
return list(zip(self.aid1, self.aid2))
@property
def confidence_code(self):
INT_TO_CODE = self._ibs.const.CONFIDENCE.INT_TO_CODE
return [INT_TO_CODE[c] for c in self.confidence]
@property
def meta_decision_code(self):
INT_TO_CODE = self._ibs.const.META_DECISION.INT_TO_CODE
return [INT_TO_CODE[c] for c in self.meta_decision]
@property
def evidence_decision_code(self):
INT_TO_CODE = self._ibs.const.EVIDENCE_DECISION.INT_TO_CODE
return [INT_TO_CODE[c] for c in self.evidence_decision]
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.annot
python -m ibeis.annot --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
janusnic/21v-python | unit_20/matplotlib/custom_cmap.py | 2 | 5759 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
"""
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25, 1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Make a modified version of cdict3 with some transparency
# in the middle of the range.
cdict4 = cdict3.copy()
cdict4['alpha'] = ((0.0, 1.0, 1.0),
# (0.25,1.0, 1.0),
(0.5, 0.3, 0.3),
# (0.75,1.0, 1.0),
(1.0, 1.0, 1.0))
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
plt.register_cmap(name='BlueRedAlpha', data=cdict4)
# Make some illustrative fake data:
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x, y)
Z = np.cos(X) * np.sin(Y) * 10
# Make the figure:
plt.figure(figsize=(6, 9))
plt.subplots_adjust(left=0.02, bottom=0.06, right=0.95, top=0.94, wspace=0.05)
# Make 4 subplots:
plt.subplot(2, 2, 1)
plt.imshow(Z, interpolation='nearest', cmap=blue_red1)
plt.colorbar()
plt.subplot(2, 2, 2)
cmap = plt.get_cmap('BlueRed2')
plt.imshow(Z, interpolation='nearest', cmap=cmap)
plt.colorbar()
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
plt.subplot(2, 2, 3)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
plt.title("Alpha = 1")
# Or as yet another variation, we can replace the rcParams
# specification *before* the imshow with the following *after*
# imshow.
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
#
plt.subplot(2, 2, 4)
# Draw a line with low zorder so it will be behind the image.
plt.plot([0, 10*np.pi], [0, 20*np.pi], color='c', lw=20, zorder=-1)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
# Here it is: changing the colormap for the current image and its
# colorbar after they have been plotted.
plt.set_cmap('BlueRedAlpha')
plt.title("Varying alpha")
#
plt.suptitle('Custom Blue-Red colormaps', fontsize=16)
plt.show()
| mit |
EmmaIshta/QUANTAXIS | QUANTAXIS/QASU/update_tdx.py | 1 | 2553 | import datetime
from QUANTAXIS.QAFetch.QATdx import (QA_fetch_get_stock_day,
QA_fetch_get_stock_min,
QA_fetch_get_stock_transaction,
QA_fetch_get_stock_xdxr)
from QUANTAXIS.QAFetch.QATushare import QA_fetch_get_stock_time_to_market
from QUANTAXIS.QASU.save_tdx import QA_SU_save_stock_xdxr
from QUANTAXIS.QAUtil import (QA_Setting, QA_util_log_info,
QA_util_to_json_from_pandas, trade_date_sse)
"""
该模块已经废弃
目前都在save_tdx中 增量更新
"""
def QA_SU_update_stock_day(client=QA_Setting.client):
def save_stock_day(code, start, end, coll):
QA_util_log_info('##JOB01 Now Updating STOCK_DAY==== %s' % (str(code)))
data = QA_util_to_json_from_pandas(
QA_fetch_get_stock_day(str(code), start, end, '00'))
if len(data) > 0:
coll.insert_many(data)
else:
pass
coll_stock_day = client.quantaxis.stock_day
for item in QA_fetch_get_stock_time_to_market().index:
if coll_stock_day.find({'code': str(item)[0:6]}).count() > 0:
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
start_date = str(coll_stock_day.find({'code': str(item)[0:6]})[
coll_stock_day.find({'code': str(item)[0:6]}).count() - 1]['date'])
print('*' * 20)
end_date = str(now_time())[0:10]
start_date = trade_date_sse[trade_date_sse.index(
start_date) + 1]
QA_util_log_info(' UPDATE_STOCK_DAY \n Trying updating %s from %s to %s' %
(item, start_date, end_date))
save_stock_day(item, start_date, end_date, coll_stock_day)
else:
save_stock_day(item, '1990-01-01',
str(now_time())[0:10], coll_stock_day)
QA_util_log_info('Done == \n')
def QA_SU_update_stock_xdxr(client=QA_Setting.client):
client.quantaxis.drop_collection('stock_xdxr')
QA_SU_save_stock_xdxr()
def QA_SU_update_stock_min(client=QA_Setting.client):
"""
stock_min 分三个库 type区分
1. 1min_level 库
2. 5min_level 库
3. 15min_level 库
"""
def QA_SU_update_index_day(client=QA_Setting.client):
pass
def QA_SU_update_index_min(client=QA_Setting.client):
pass
if __name__ == '__main__':
QA_SU_update_stock_day()
# QA_SU_update_stock_xdxr()
| mit |
herilalaina/scikit-learn | sklearn/cluster/birch.py | 18 | 23684 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
y : Ignored
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
xya/sms-tools | software/models_interface/dftModel_function.py | 21 | 2413 | # function to call the main analysis/synthesis functions in software/models/dftModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import dftModel as DFT
def main(inputFile = '../../sounds/piano.wav', window = 'blackman', M = 511, N = 1024, time = .2):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (choice of rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size (odd integer value)
N: fft size (power of two, bigger or equal than than M)
time: time to start analysis (in seconds)
"""
# read input sound (monophonic with sampling rate of 44100)
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# get a fragment of the input sound of size M
sample = int(time*fs)
if (sample+M >= x.size or sample < 0): # raise error if time outside of sound
raise ValueError("Time outside sound boundaries")
x1 = x[sample:sample+M]
# compute the dft of the sound fragment
mX, pX = DFT.dftAnal(x1, w, N)
# compute the inverse dft of the spectrum
y = DFT.dftSynth(mX, pX, w.size)*sum(w)
# create figure
plt.figure(figsize=(12, 9))
# plot the sound fragment
plt.subplot(4,1,1)
plt.plot(time + np.arange(M)/float(fs), x1)
plt.axis([time, time + M/float(fs), min(x1), max(x1)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrum
plt.subplot(4,1,2)
plt.plot(float(fs)*np.arange(mX.size)/float(N), mX, 'r')
plt.axis([0, fs/2.0, min(mX), max(mX)])
plt.title ('magnitude spectrum: mX')
plt.ylabel('amplitude (dB)')
plt.xlabel('frequency (Hz)')
# plot the phase spectrum
plt.subplot(4,1,3)
plt.plot(float(fs)*np.arange(pX.size)/float(N), pX, 'c')
plt.axis([0, fs/2.0, min(pX), max(pX)])
plt.title ('phase spectrum: pX')
plt.ylabel('phase (radians)')
plt.xlabel('frequency (Hz)')
# plot the sound resulting from the inverse dft
plt.subplot(4,1,4)
plt.plot(time + np.arange(M)/float(fs), y)
plt.axis([time, time + M/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
MadsJensen/CAA | time_decoding_sensor-grad_ent.py | 1 | 1847 | import sys
import mne
from mne.decoding import GeneralizationAcrossTime
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from my_settings import (data_path, epochs_folder)
import matplotlib
matplotlib.use('Agg')
subject = sys.argv[1]
# Load epochs from both conditions
epochs = mne.read_epochs(
epochs_folder + "%s_trial_start-epo.fif" % subject, preload=True)
# Crop and downsmample to make it faster
epochs.crop(None, tmax=1)
epochs.pick_types(meg="grad")
epochs_ent_left = epochs["ent/left"].copy()
epochs_ent_right = epochs["ent/right"].copy()
del epochs
epochs_ent_left.events[:, 2] = 0
epochs_ent_right.events[:, 2] = 1
epochs_ent_left.event_id = {"0": 0}
epochs_ent_right.event_id = {"1": 1}
epochs_data = mne.concatenate_epochs([epochs_ent_left, epochs_ent_right])
# Equalise channels and epochs, and concatenate epochs
epochs_data.equalize_event_counts(["0", "1"])
# Classifier
clf = make_pipeline(StandardScaler(), LogisticRegression(C=1))
# Setup the y vector and GAT
gat = GeneralizationAcrossTime(
predict_mode='mean-prediction', scorer="roc_auc", n_jobs=1)
# Fit model
print("fitting GAT")
gat.fit(epochs_data)
# Scoring
print("Scoring GAT")
gat.score(epochs_data)
# Save model
joblib.dump(
gat, data_path + "decode_time_gen/%s_gat_allsensor-grad_ent.jl" % subject)
# make matrix plot and save it
fig = gat.plot(cmap="viridis", title="Temporal Gen for subject: %s" % subject)
fig.savefig(data_path + "decode_time_gen/%s_gat_matrix_allsensor-grad_ent.png"
% subject)
fig = gat.plot_diagonal(
chance=0.5, title="Temporal Gen for subject: %s" % subject)
fig.savefig(data_path +
"decode_time_gen/%s_gat_diagonal_allsensor-grad_ent.png" % subject)
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 34 | 5169 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
from __future__ import print_function
import numpy as np
import statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print(n_cdf)
print('')
print((x<np.array(xli[0])).all(-1).mean(0))
print((x[...,None]<xliarr).all(1).mean(0))
print(mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000))
print(mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000))
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print(mv2m.mean)
print(mv2m.cov)
mv2c = mvn3.conditional(np.array([0,1]), [0])
print(mv2c.mean)
print(mv2c.cov)
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
print(mv2c.cov)
import statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print(res.model.predict(np.array([1,0,0])))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print(res.model.predict(np.array([1,1,1])))
print(mv2c.mean)
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print(mvt3_cdf0)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.3026741) # "error": 0.0004832187
print('R', 0.3026855) # error 3.444375e-06 with smaller abseps
print('diff', mvt3_cdf0 - 0.3026855)
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print(mvt3_cdf1)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.1946621) # "error": 0.0002524817)
print('R', 0.1946217) # "error:"2.748699e-06 with smaller abseps)
print('diff', mvt3_cdf1 - 0.1946217)
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import statsmodels.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
#plt.show()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 9 | 14133 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import pytest
import numpy as np
from inspect import signature
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation, CompoundKernel)
from sklearn.base import clone
from sklearn.utils._testing import (assert_almost_equal, assert_array_equal,
assert_array_almost_equal,
assert_allclose,
assert_raise_message)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_rbf_plus_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_gradient(kernel):
# Compare analytic and numeric gradient of kernels.
K, K_gradient = kernel(X, eval_gradient=True)
assert K_gradient.shape[0] == X.shape[0]
assert K_gradient.shape[1] == X.shape[0]
assert K_gradient.shape[2] == kernel.theta.shape[0]
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
@pytest.mark.parametrize(
'kernel',
[kernel for kernel in kernels
# skip non-basic kernels
if not (isinstance(kernel, KernelOperator)
or isinstance(kernel, Exponentiation))])
def test_kernel_theta(kernel):
# Check that parameter vector theta of kernel is set correctly.
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert (
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters) ==
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert (theta[i] == np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert theta.shape[0] == new_kernel.theta.shape[0] + 1
assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
if i > 0:
assert theta[:i] == new_kernel.theta[:i]
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert theta[i + 1:] == new_kernel.theta[i:]
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
# Identity is not satisfied on diagonal
if kernel != kernel_rbf_plus_white])
def test_auto_vs_cross(kernel):
# Auto-correlation and cross-correlation should be consistent.
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_diag(kernel):
# Test that diag method of kernel returns consistent results.
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
if kernel.is_stationary()])
def test_kernel_stationary(kernel):
# Test stationarity of kernels.
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_input_type(kernel):
# Test whether kernels is for vectors or structured data
if isinstance(kernel, Exponentiation):
assert(kernel.requires_vector_input ==
kernel.kernel.requires_vector_input)
if isinstance(kernel, KernelOperator):
assert(kernel.requires_vector_input ==
(kernel.k1.requires_vector_input or
kernel.k2.requires_vector_input))
def test_compound_kernel_input_type():
kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
assert not kernel.requires_vector_input
kernel = CompoundKernel([WhiteKernel(noise_level=3.0),
RBF(length_scale=2.0)])
assert kernel.requires_vector_input
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert attr_value1 == attr_value2
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_clone(kernel):
# Test that sklearn's clone works correctly on kernels.
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert kernel == kernel_cloned
assert id(kernel) != id(kernel_cloned)
# Check that all constructor parameters are equal.
assert kernel.get_params() == kernel_cloned.get_params()
# Check that all hyperparameters are equal.
check_hyperparameters_equal(kernel, kernel_cloned)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_clone_after_set_params(kernel):
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
bounds = (1e-5, 1e5)
kernel_cloned = clone(kernel)
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
# XXX unreached code as of v0.22
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert (kernel_cloned_clone.get_params() == kernel_cloned.get_params())
assert id(kernel_cloned_clone) != id(kernel_cloned)
check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# matern kernel with coef0==inf is equal to RBF kernel
K_rbf = RBF(length_scale=1.0)(X)
K = Matern(nu=np.inf, length_scale=1.0)(X)
assert_array_almost_equal(K, K_rbf)
assert_allclose(K, K_rbf)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
# test that coef0==large is close to RBF
large = 100
K1 = Matern(nu=large, length_scale=1.0)(X)
K2 = RBF(length_scale=1.0)(X)
assert_array_almost_equal(K1, K2, decimal=2)
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_versus_pairwise(kernel):
# Check that GP kernels can also be used as pairwise kernels.
# Test auto-kernel
if kernel != kernel_rbf_plus_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
@pytest.mark.parametrize("kernel", kernels)
def test_set_get_params(kernel):
# Check that set_params()/get_params() is consistent with kernel.theta.
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
@pytest.mark.parametrize("kernel", kernels)
def test_repr_kernels(kernel):
# Smoke-test for repr in kernels.
repr(kernel)
def test_rational_quadratic_kernel():
kernel = RationalQuadratic(length_scale=[1., 1.])
assert_raise_message(AttributeError,
"RationalQuadratic kernel only supports isotropic "
"version, please use a single "
"scalar for length_scale", kernel, X)
| bsd-3-clause |
jrdurrant/insect_analysis | vision/measurements/subspace_shape.py | 1 | 4784 | import numpy as np
from skimage.transform import SimilarityTransform, estimate_transform, matrix_transform
import matplotlib.pyplot as plt
import scipy
from skimage.filters import gaussian
def plot_closest_points(image_points, edge_points, closest_edge_points):
plt.plot(edge_points[:, 0], edge_points[:, 1], 'r+')
plt.plot(image_points[:, 0], image_points[:, 1], 'b')
for im, ed in zip(image_points, closest_edge_points):
plt.plot([im[0], ed[0]], [im[1], ed[1]], 'g')
plt.show()
def learn(points, K=1):
points = [point_set.flatten() for point_set in points]
w = np.stack(points, axis=1)
mu = np.mean(w, axis=1).reshape(-1, 1)
mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
W = w - mu
U, L2, _ = np.linalg.svd(np.dot(W, W.T))
D = mu.shape[0]
sigma2 = np.sum(L2[(K + 1):(D + 1)]) / (D - K)
phi = U[:, :K] @ np.sqrt(np.diag(L2[:K]) - sigma2 * np.eye(K))
return mu, phi, sigma2
def update_h(sigma2, phi, y, mu, psi):
"""Updates the hidden variables using updated parameters.
This is an implementation of the equation:
.. math::
\\hat{h} = (\\sigma^2 I + \\sum_{n=1}^N \\Phi_n^T A^T A \\Phi_n)^{-1} \\sum_{n=1}^N \\Phi_n^T A^T (y_n - A \\mu_n - b)
"""
N = y.shape[0]
K = phi.shape[1]
A = psi.params[:2, :2]
b = psi.translation
partial_0 = 0
for phi_n in np.split(phi, N, axis=0):
partial_0 += phi_n.T @ A.T @ A @ phi_n
partial_1 = sigma2 * np.eye(K) + partial_0
partial_2 = np.zeros((K, 1))
for phi_n, y_n, mu_n in zip(np.split(phi, N, axis=0), y, mu.reshape(-1, 2)):
partial_2 += phi_n.T @ A.T @ (y_n - A @ mu_n - b).reshape(2, -1)
return np.linalg.inv(partial_1) @ partial_2
def similarity(edge_image, mu, phi, sigma2, h, psi):
height, width = edge_image.shape
edge_distance = scipy.ndimage.distance_transform_edt(~edge_image)
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)
closest_distances = scipy.interpolate.interp2d(range(width), range(height), edge_distance)
K = h.size
noise = scipy.stats.multivariate_normal(mean=np.zeros(K), cov=np.eye(K))
if noise.pdf(h.flatten()) == 0:
print(h.flatten())
noise = np.log(noise.pdf(h.flatten()))
return -closest_distances(image_points[:, 0], image_points[:, 1]).sum() / sigma2
def gradient_step(gradient_y, gradient_x, magnitude, locations, step_size=5):
height, width = magnitude.shape
y = np.clip(locations[:, 1], 0, height - 1).astype(int)
x = np.clip(locations[:, 0], 0, width - 1).astype(int)
y_new = np.clip(locations[:, 1] - step_size * magnitude[y, x] * gradient_y[y, x], 0, height - 1)
x_new = np.clip(locations[:, 0] - step_size * magnitude[y, x] * gradient_x[y, x], 0, width - 1)
return np.stack((x_new, y_new), axis=1)
def infer(edge_image, edge_lengths, mu, phi, sigma2,
update_slice=slice(None),
scale_estimate=None,
rotation=0,
translation=(0, 0)):
edge_near = scipy.ndimage.distance_transform_edt(~edge_image)
edge_near_blur = gaussian(edge_near, 2)
Gy, Gx = np.gradient(edge_near_blur)
mag = np.sqrt(np.power(Gy, 2) + np.power(Gx, 2))
if scale_estimate is None:
scale_estimate = min(edge_image.shape) * 4
mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
average_distance = np.sqrt(np.power(mu.reshape(-1, 2), 2).sum(axis=1)).mean()
scale_estimate /= average_distance * np.sqrt(2)
h = np.zeros((phi.shape[1], 1))
psi = SimilarityTransform(scale=scale_estimate, rotation=rotation, translation=translation)
while True:
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
w = mu.reshape(-1, 2)
psi = estimate_transform('similarity', w[update_slice, :], closest_edge_points)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
mu_slice = mu.reshape(-1, 2)[update_slice, :].reshape(-1, 1)
K = phi.shape[-1]
phi_full = phi.reshape(-1, 2, K)
phi_slice = phi_full[update_slice, :].reshape(-1, K)
h = update_h(sigma2, phi_slice, closest_edge_points, mu_slice, psi)
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)
update_slice = yield image_points, closest_edge_points, h, psi
| gpl-2.0 |
h2educ/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
PalNilsson/pilot2 | pilot/user/atlas/setup.py | 1 | 17850 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2017-2020
import os
import re
import glob
from time import sleep
from pilot.common.errorcodes import ErrorCodes
from pilot.common.exception import NoSoftwareDir
from pilot.info import infosys
from pilot.util.container import execute
from pilot.util.filehandling import read_file, write_file, copy
from .metadata import get_file_info_from_xml
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def get_file_system_root_path():
"""
Return the root path of the local file system.
The function returns "/cvmfs" or "/(some path)/cvmfs" in case the expected file system root path is not
where it usually is (e.g. on an HPC). A site can set the base path by exporting ATLAS_SW_BASE.
:return: path (string)
"""
return os.environ.get('ATLAS_SW_BASE', '/cvmfs')
def should_pilot_prepare_setup(noexecstrcnv, jobpars, imagename=None):
"""
Determine whether the pilot should add the setup to the payload command or not.
The pilot will not add asetup if jobPars already contain the information (i.e. it was set by the payload creator).
If noExecStrCnv is set, then jobPars is expected to contain asetup.sh + options
If a stand-alone container / user defined container is used, pilot should not prepare asetup.
:param noexecstrcnv: boolean.
:param jobpars: job parameters (string).
:param imagename: container image (string).
:return: boolean.
"""
if imagename:
return False
if noexecstrcnv:
if "asetup.sh" in jobpars:
logger.info("asetup will be taken from jobPars")
preparesetup = False
else:
logger.info("noExecStrCnv is set but asetup command was not found in jobPars (pilot will prepare asetup)")
preparesetup = True
else:
logger.info("pilot will prepare the setup")
preparesetup = True
return preparesetup
def get_alrb_export(add_if=False):
"""
Return the export command for the ALRB path if it exists.
If the path does not exist, return empty string.
:param add_if: Boolean. True means that an if statement will be placed around the export.
:return: export command
"""
path = "%s/atlas.cern.ch/repo" % get_file_system_root_path()
cmd = "export ATLAS_LOCAL_ROOT_BASE=%s/ATLASLocalRootBase;" % path if os.path.exists(path) else ""
# if [ -z "$ATLAS_LOCAL_ROOT_BASE" ]; then export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase; fi;
if cmd and add_if:
cmd = 'if [ -z \"$ATLAS_LOCAL_ROOT_BASE\" ]; then ' + cmd + ' fi;'
return cmd
def get_asetup(asetup=True, alrb=False, add_if=False):
"""
Define the setup for asetup, i.e. including full path to asetup and setting of ATLAS_LOCAL_ROOT_BASE
Only include the actual asetup script if asetup=True. This is not needed if the jobPars contain the payload command
but the pilot still needs to add the exports and the atlasLocalSetup.
:param asetup: Boolean. True value means that the pilot should include the asetup command.
:param alrb: Boolean. True value means that the function should return special setup used with ALRB and containers.
:param add_if: Boolean. True means that an if statement will be placed around the export.
:raises: NoSoftwareDir if appdir does not exist.
:return: source <path>/asetup.sh (string).
"""
cmd = ""
alrb_cmd = get_alrb_export(add_if=add_if)
if alrb_cmd != "":
cmd = alrb_cmd
if not alrb:
cmd += "source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;"
if asetup:
cmd += "source $AtlasSetup/scripts/asetup.sh"
else:
try: # use try in case infosys has not been initiated
appdir = infosys.queuedata.appdir
except Exception:
appdir = ""
if appdir == "":
appdir = os.environ.get('VO_ATLAS_SW_DIR', '')
if appdir != "":
# make sure that the appdir exists
if not os.path.exists(appdir):
msg = 'appdir does not exist: %s' % appdir
logger.warning(msg)
raise NoSoftwareDir(msg)
if asetup:
cmd = "source %s/scripts/asetup.sh" % appdir
# do not return an empty string
#if not cmd:
# cmd = "what?"
return cmd
def get_asetup_options(release, homepackage):
"""
Determine the proper asetup options.
:param release: ATLAS release string.
:param homepackage: ATLAS homePackage string.
:return: asetup options (string).
"""
asetupopt = []
release = re.sub('^Atlas-', '', release)
# is it a user analysis homePackage?
if 'AnalysisTransforms' in homepackage:
_homepackage = re.sub('^AnalysisTransforms-*', '', homepackage)
if _homepackage == '' or re.search(r'^\d+\.\d+\.\d+$', release) is None: # Python 3 (added r)
if release != "":
asetupopt.append(release)
if _homepackage != '':
asetupopt += _homepackage.split('_')
else:
asetupopt += homepackage.split('/')
if release not in homepackage and release not in asetupopt:
asetupopt.append(release)
# Add the notest,here for all setups (not necessary for late releases but harmless to add)
asetupopt.append('notest')
# asetupopt.append('here')
# Add the fast option if possible (for the moment, check for locally defined env variable)
if "ATLAS_FAST_ASETUP" in os.environ:
asetupopt.append('fast')
return ','.join(asetupopt)
def is_standard_atlas_job(release):
"""
Is it a standard ATLAS job?
A job is a standard ATLAS job if the release string begins with 'Atlas-'.
:param release: Release value (string).
:return: Boolean. Returns True if standard ATLAS job.
"""
return release.startswith('Atlas-')
def set_inds(dataset):
"""
Set the INDS environmental variable used by runAthena.
:param dataset: dataset for input files (realDatasetsIn) (string).
:return:
"""
inds = ""
_dataset = dataset.split(',')
for ds in _dataset:
if "DBRelease" not in ds and ".lib." not in ds:
inds = ds
break
if inds != "":
logger.info("setting INDS environmental variable to: %s" % (inds))
os.environ['INDS'] = inds
else:
logger.warning("INDS unknown")
def get_analysis_trf(transform, workdir):
"""
Prepare to download the user analysis transform with curl.
The function will verify the download location from a known list of hosts.
:param transform: full trf path (url) (string).
:param workdir: work directory (string).
:return: exit code (int), diagnostics (string), transform_name (string)
"""
ec = 0
diagnostics = ""
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
search_pattern = "%s/jobO.*.tar.gz" % harvester_workdir
logger.debug("search_pattern - %s" % search_pattern)
jobopt_files = glob.glob(search_pattern)
for jobopt_file in jobopt_files:
logger.debug("jobopt_file = %s workdir = %s" % (jobopt_file, workdir))
try:
copy(jobopt_file, workdir)
except Exception as e:
logger.error("could not copy file %s to %s : %s" % (jobopt_file, workdir, e))
if '/' in transform:
transform_name = transform.split('/')[-1]
else:
logger.warning('did not detect any / in %s (using full transform name)' % transform)
transform_name = transform
# is the command already available? (e.g. if already downloaded by a preprocess/main process step)
if os.path.exists(os.path.join(workdir, transform_name)):
logger.info('script %s is already available - no need to download again' % transform_name)
return ec, diagnostics, transform_name
original_base_url = ""
# verify the base URL
for base_url in get_valid_base_urls():
if transform.startswith(base_url):
original_base_url = base_url
break
if original_base_url == "":
diagnostics = "invalid base URL: %s" % transform
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
# try to download from the required location, if not - switch to backup
status = False
for base_url in get_valid_base_urls(order=original_base_url):
trf = re.sub(original_base_url, base_url, transform)
logger.debug("attempting to download script: %s" % trf)
status, diagnostics = download_transform(trf, transform_name, workdir)
if status:
break
if not status:
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
logger.info("successfully downloaded script")
path = os.path.join(workdir, transform_name)
logger.debug("changing permission of %s to 0o755" % path)
try:
os.chmod(path, 0o755) # Python 2/3
except Exception as e:
diagnostics = "failed to chmod %s: %s" % (transform_name, e)
return errors.CHMODTRF, diagnostics, ""
return ec, diagnostics, transform_name
def download_transform(url, transform_name, workdir):
"""
Download the transform from the given url
:param url: download URL with path to transform (string).
:param transform_name: trf name (string).
:param workdir: work directory (string).
:return:
"""
status = False
diagnostics = ""
path = os.path.join(workdir, transform_name)
cmd = 'curl -sS \"%s\" > %s' % (url, path)
trial = 1
max_trials = 3
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
# skip curl by setting max_trials = 0
max_trials = 0
source_path = os.path.join(harvester_workdir, transform_name)
try:
copy(source_path, path)
status = True
except Exception as error:
status = False
diagnostics = "Failed to copy file %s to %s : %s" % (source_path, path, error)
logger.error(diagnostics)
# try to download the trf a maximum of 3 times
while trial <= max_trials:
logger.info("executing command [trial %d/%d]: %s" % (trial, max_trials, cmd))
exit_code, stdout, stderr = execute(cmd, mute=True)
if not stdout:
stdout = "(None)"
if exit_code != 0:
# Analyze exit code / output
diagnostics = "curl command failed: %d, %s, %s" % (exit_code, stdout, stderr)
logger.warning(diagnostics)
if trial == max_trials:
logger.fatal('could not download transform: %s' % stdout)
status = False
break
else:
logger.info("will try again after 60 s")
sleep(60)
else:
logger.info("curl command returned: %s" % stdout)
status = True
break
trial += 1
return status, diagnostics
def get_valid_base_urls(order=None):
"""
Return a list of valid base URLs from where the user analysis transform may be downloaded from.
If order is defined, return given item first.
E.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
NOTE: the URL list may be out of date.
:param order: order (string).
:return: valid base URLs (list).
"""
valid_base_urls = []
_valid_base_urls = ["http://www.usatlas.bnl.gov",
"https://www.usatlas.bnl.gov",
"http://pandaserver.cern.ch",
"http://atlpan.web.cern.ch/atlpan",
"https://atlpan.web.cern.ch/atlpan",
"http://classis01.roma1.infn.it",
"http://atlas-install.roma1.infn.it"]
if order:
valid_base_urls.append(order)
for url in _valid_base_urls:
if url != order:
valid_base_urls.append(url)
else:
valid_base_urls = _valid_base_urls
return valid_base_urls
def get_payload_environment_variables(cmd, job_id, task_id, attempt_nr, processing_type, site_name, analysis_job):
"""
Return an array with enviroment variables needed by the payload.
:param cmd: payload execution command (string).
:param job_id: PanDA job id (string).
:param task_id: PanDA task id (string).
:param attempt_nr: PanDA job attempt number (int).
:param processing_type: processing type (string).
:param site_name: site name (string).
:param analysis_job: True for user analysis jobs, False otherwise (boolean).
:return: list of environment variables needed by the payload.
"""
variables = []
variables.append('export PANDA_RESOURCE=\'%s\';' % site_name)
variables.append('export FRONTIER_ID=\"[%s_%s]\";' % (task_id, job_id))
variables.append('export CMSSW_VERSION=$FRONTIER_ID;')
variables.append('export PandaID=%s;' % os.environ.get('PANDAID', 'unknown'))
variables.append('export PanDA_TaskID=\'%s\';' % os.environ.get('PanDA_TaskID', 'unknown'))
variables.append('export PanDA_AttemptNr=\'%d\';' % attempt_nr)
variables.append('export INDS=\'%s\';' % os.environ.get('INDS', 'unknown'))
# Unset ATHENA_PROC_NUMBER if set for event service Merge jobs
if "Merge_tf" in cmd and 'ATHENA_PROC_NUMBER' in os.environ:
variables.append('unset ATHENA_PROC_NUMBER;')
variables.append('unset ATHENA_CORE_NUMBER;')
if analysis_job:
variables.append('export ROOT_TTREECACHE_SIZE=1;')
try:
core_count = int(os.environ.get('ATHENA_PROC_NUMBER'))
except Exception:
_core_count = 'export ROOTCORE_NCPUS=1;'
else:
_core_count = 'export ROOTCORE_NCPUS=%d;' % core_count
variables.append(_core_count)
if processing_type == "":
logger.warning("RUCIO_APPID needs job.processingType but it is not set!")
else:
variables.append('export RUCIO_APPID=\'%s\';' % processing_type)
variables.append('export RUCIO_ACCOUNT=\'%s\';' % os.environ.get('RUCIO_ACCOUNT', 'pilot'))
return variables
def get_writetoinput_filenames(writetofile):
"""
Extract the writeToFile file name(s).
writeToFile='tmpin_mc16_13TeV.345935.PhPy8EG_A14_ttbarMET100_200_hdamp258p75_nonallhad.merge.AOD.e6620_e5984_s3126_r10724_r10726_tid15760866_00:AOD.15760866._000002.pool.root.1'
-> return 'tmpin_mc16_13TeV.345935.PhPy8EG_A14_ttbarMET100_200_hdamp258p75_nonallhad.merge.AOD.e6620_e5984_s3126_r10724_r10726_tid15760866_00'
:param writetofile: string containing file name information.
:return: list of file names
"""
filenames = []
entries = writetofile.split('^')
for entry in entries:
if ':' in entry:
name = entry.split(":")[0]
name = name.replace('.pool.root.', '.txt.') # not necessary?
filenames.append(name)
return filenames
def replace_lfns_with_turls(cmd, workdir, filename, infiles, writetofile=""):
"""
Replace all LFNs with full TURLs in the payload execution command.
This function is used with direct access in production jobs. Athena requires a full TURL instead of LFN.
:param cmd: payload execution command (string).
:param workdir: location of metadata file (string).
:param filename: metadata file name (string).
:param infiles: list of input files.
:param writetofile:
:return: updated cmd (string).
"""
turl_dictionary = {} # { LFN: TURL, ..}
path = os.path.join(workdir, filename)
if os.path.exists(path):
file_info_dictionary = get_file_info_from_xml(workdir, filename=filename)
for inputfile in infiles:
if inputfile in cmd:
turl = file_info_dictionary[inputfile][0]
turl_dictionary[inputfile] = turl
# if turl.startswith('root://') and turl not in cmd:
if turl not in cmd:
cmd = cmd.replace(inputfile, turl)
logger.info("replaced '%s' with '%s' in the run command" % (inputfile, turl))
# replace the LFNs with TURLs in the writetofile input file list (if it exists)
if writetofile and turl_dictionary:
filenames = get_writetoinput_filenames(writetofile)
logger.info("filenames=%s" % filenames)
for fname in filenames:
new_lines = []
path = os.path.join(workdir, fname)
if os.path.exists(path):
f = read_file(path)
for line in f.split('\n'):
fname = os.path.basename(line)
if fname in turl_dictionary:
turl = turl_dictionary[fname]
new_lines.append(turl)
else:
if line:
new_lines.append(line)
lines = '\n'.join(new_lines)
if lines:
write_file(path, lines)
logger.info("lines=%s" % lines)
else:
logger.warning("file does not exist: %s" % path)
else:
logger.warning("could not find file: %s (cannot locate TURLs for direct access)" % filename)
return cmd
| apache-2.0 |
frank-tancf/scikit-learn | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
hrjn/scikit-learn | sklearn/utils/graph.py | 24 | 6326 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph : sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> list(sorted(single_source_shortest_path_length(graph, 0).items()))
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> graph = np.ones((6, 6))
>>> list(sorted(single_source_shortest_path_length(graph, 2).items()))
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
jasdumas/jasdumas.github.io | post_data/kmeans-cluster-poker-hands.py | 1 | 7337 | # load libraries
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.cluster import KMeans
import urllib.request
from pylab import rcParams
rcParams['figure.figsize'] = 9, 8
'''
GET DATA
'''
# read training and test data from the url link and save the file to your working directory
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-training-true.data"
urllib.request.urlretrieve(url, "poker_train.csv")
url2 = "http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-testing.data"
urllib.request.urlretrieve(url2, "poker_test.csv")
# read the data in and add column names
data_train = pd.read_csv("poker_train.csv", header=None,
names=['S1', 'C1', 'S2', 'C2', 'S3', 'C3','S4', 'C4', 'S5', 'C5', 'CLASS'])
data_test = pd.read_csv("poker_test.csv", header=None,
names=['S1', 'C1', 'S2', 'C2', 'S3', 'C3','S4', 'C4', 'S5', 'C5', 'CLASS'])
'''
EXPLORE THE DATA
'''
# summary statistics including counts, mean, stdev, quartiles for the training dataset
data_train.head(n=5)
data_train.dtypes # data types of each variable
data_train.describe()
'''
SUBSET THE DATA
'''
# subset clustering variables
cluster=data_train[['S1', 'C1', 'S2', 'C2', 'S3', 'C3','S4', 'C4', 'S5', 'C5']]
'''
STANDARIZE THE DATA
'''
# standardize clustering variables to have mean=0 and sd=1 so that card suit and
# rank are on the same scale as to have the variables equally contribute to the analysis
clustervar=cluster.copy() # create a copy
clustervar['S1']=preprocessing.scale(clustervar['S1'].astype('float64'))
clustervar['C1']=preprocessing.scale(clustervar['C1'].astype('float64'))
clustervar['S2']=preprocessing.scale(clustervar['S2'].astype('float64'))
clustervar['C2']=preprocessing.scale(clustervar['C2'].astype('float64'))
clustervar['S3']=preprocessing.scale(clustervar['S3'].astype('float64'))
clustervar['C3']=preprocessing.scale(clustervar['C3'].astype('float64'))
clustervar['S4']=preprocessing.scale(clustervar['S4'].astype('float64'))
clustervar['C4']=preprocessing.scale(clustervar['C4'].astype('float64'))
clustervar['S5']=preprocessing.scale(clustervar['S5'].astype('float64'))
clustervar['C5']=preprocessing.scale(clustervar['C5'].astype('float64'))
# The data has been already split data into train and test sets
clus_train = clustervar
'''
K-MEANS ANALYSIS - INITIAL CLUSTER SET
'''
# k-means cluster analysis for 1-10 clusters due to the 10 possible class outcomes for poker hands
from scipy.spatial.distance import cdist
clusters=range(1,11)
meandist=[]
# loop through each cluster and fit the model to the train set
# generate the predicted cluster assingment and append the mean distance my taking the sum divided by the shape
for k in clusters:
model=KMeans(n_clusters=k)
model.fit(clus_train)
clusassign=model.predict(clus_train)
meandist.append(sum(np.min(cdist(clus_train, model.cluster_centers_, 'euclidean'), axis=1))
/ clus_train.shape[0])
"""
Plot average distance from observations from the cluster centroid
to use the Elbow Method to identify number of clusters to choose
"""
plt.plot(clusters, meandist)
plt.xlabel('Number of clusters')
plt.ylabel('Average distance')
plt.title('Selecting k with the Elbow Method') # pick the fewest number of clusters that reduces the average distance
# Interpret 2 cluster solution
model3=KMeans(n_clusters=2)
model3.fit(clus_train) # has cluster assingments based on using 3 clusters
clusassign=model3.predict(clus_train)
# plot clusters
''' Canonical Discriminant Analysis for variable reduction:
1. creates a smaller number of variables
2. linear combination of clustering variables
3. Canonical variables are ordered by proportion of variance accounted for
4. most of the varaince will be accounted for in the first few canonical variables
'''
from sklearn.decomposition import PCA # CA from PCA function
pca_2 = PCA(2) # return 2 first canonical variables
plot_columns = pca_2.fit_transform(clus_train) # fit CA to the train dataset
plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=model3.labels_,) # plot 1st canonical variable on x axis, 2nd on y-axis
plt.xlabel('Canonical variable 1')
plt.ylabel('Canonical variable 2')
plt.title('Scatterplot of Canonical Variables for 2 Clusters')
plt.show() # close or overlapping clusters idicate correlated variables with low in-class variance but not good separation. 2 cluster might be better.
"""
BEGIN multiple steps to merge cluster assignment with clustering variables to examine
cluster variable means by cluster
"""
# create a unique identifier variable from the index for the
# cluster training data to merge with the cluster assignment variable
clus_train.reset_index(level=0, inplace=True)
# create a list that has the new index variable
cluslist=list(clus_train['index'])
# create a list of cluster assignments
labels=list(model3.labels_)
# combine index variable list with cluster assignment list into a dictionary
newlist=dict(zip(cluslist, labels))
newlist
# convert newlist dictionary to a dataframe
newclus=DataFrame.from_dict(newlist, orient='index')
newclus
# rename the cluster assignment column
newclus.columns = ['cluster']
# now do the same for the cluster assignment variable
# create a unique identifier variable from the index for the
# cluster assignment dataframe
# to merge with cluster training data
newclus.reset_index(level=0, inplace=True)
# merge the cluster assignment dataframe with the cluster training variable dataframe
# by the index variable
merged_train=pd.merge(clus_train, newclus, on='index')
merged_train.head(n=100)
# cluster frequencies
merged_train.cluster.value_counts()
"""
END multiple steps to merge cluster assignment with clustering variables to examine
cluster variable means by cluster
"""
# FINALLY calculate clustering variable means by cluster
clustergrp = merged_train.groupby('cluster').mean()
print ("Clustering variable means by cluster")
print(clustergrp)
'''
validate clusters in training data by examining cluster differences in CLASS using ANOVA
first have to merge CLASS of poker hand with clustering variables and cluster assignment data
'''
# split into test / train for class
pokerhand_train=data_train['CLASS']
pokerhand_test=data_test['CLASS']
# put into a pandas dataFrame
pokerhand_train=pd.DataFrame(pokerhand_train)
pokerhand_test=pd.DataFrame(pokerhand_test)
pokerhand_train.reset_index(level=0, inplace=True) # reset index
merged_train_all=pd.merge(pokerhand_train, merged_train, on='index') # merge the pokerhand train with merged clusters
sub1 = merged_train_all[['CLASS', 'cluster']].dropna()
import statsmodels.formula.api as smf
import statsmodels.stats.multicomp as multi
# respone formula
pokermod = smf.ols(formula='CLASS ~ cluster', data=sub1).fit()
print (pokermod.summary())
print ('means for Poker hands by cluster')
m1= sub1.groupby('cluster').mean()
print (m1)
print ('standard deviations for Poker hands by cluster')
m2= sub1.groupby('cluster').std()
print (m2)
mc1 = multi.MultiComparison(sub1['CLASS'], sub1['cluster'])
res1 = mc1.tukeyhsd()
print(res1.summary())
| mit |
winklerand/pandas | pandas/tests/io/msgpack/test_newspec.py | 22 | 2650 | # coding: utf-8
from pandas.io.msgpack import packb, unpackb, ExtType
def test_str8():
header = b'\xd9'
data = b'x' * 32
b = packb(data.decode(), use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\x20'
assert b[2:] == data
assert unpackb(b) == data
data = b'x' * 255
b = packb(data.decode(), use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\xff'
assert b[2:] == data
assert unpackb(b) == data
def test_bin8():
header = b'\xc4'
data = b''
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\x00'
assert b[2:] == data
assert unpackb(b) == data
data = b'x' * 255
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 2
assert b[0:2] == header + b'\xff'
assert b[2:] == data
assert unpackb(b) == data
def test_bin16():
header = b'\xc5'
data = b'x' * 256
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 3
assert b[0:1] == header
assert b[1:3] == b'\x01\x00'
assert b[3:] == data
assert unpackb(b) == data
data = b'x' * 65535
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 3
assert b[0:1] == header
assert b[1:3] == b'\xff\xff'
assert b[3:] == data
assert unpackb(b) == data
def test_bin32():
header = b'\xc6'
data = b'x' * 65536
b = packb(data, use_bin_type=True)
assert len(b) == len(data) + 5
assert b[0:1] == header
assert b[1:5] == b'\x00\x01\x00\x00'
assert b[5:] == data
assert unpackb(b) == data
def test_ext():
def check(ext, packed):
assert packb(ext) == packed
assert unpackb(packed) == ext
check(ExtType(0x42, b'Z'), b'\xd4\x42Z') # fixext 1
check(ExtType(0x42, b'ZZ'), b'\xd5\x42ZZ') # fixext 2
check(ExtType(0x42, b'Z' * 4), b'\xd6\x42' + b'Z' * 4) # fixext 4
check(ExtType(0x42, b'Z' * 8), b'\xd7\x42' + b'Z' * 8) # fixext 8
check(ExtType(0x42, b'Z' * 16), b'\xd8\x42' + b'Z' * 16) # fixext 16
# ext 8
check(ExtType(0x42, b''), b'\xc7\x00\x42')
check(ExtType(0x42, b'Z' * 255), b'\xc7\xff\x42' + b'Z' * 255)
# ext 16
check(ExtType(0x42, b'Z' * 256), b'\xc8\x01\x00\x42' + b'Z' * 256)
check(ExtType(0x42, b'Z' * 0xffff), b'\xc8\xff\xff\x42' + b'Z' * 0xffff)
# ext 32
check(
ExtType(0x42, b'Z' *
0x10000), b'\xc9\x00\x01\x00\x00\x42' + b'Z' * 0x10000)
# needs large memory
# check(ExtType(0x42, b'Z'*0xffffffff),
# b'\xc9\xff\xff\xff\xff\x42' + b'Z'*0xffffffff)
| bsd-3-clause |
dingocuster/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
bzero/statsmodels | examples/python/regression_plots.py | 33 | 9585 |
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
| bsd-3-clause |
cogmission/nupic.research | projects/vehicle-control/agent/run_q.py | 12 | 5498 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from sensorimotor.encoders.one_d_depth import OneDDepthEncoder
from sensorimotor.q_learner import QLearner
ACTIIONS = ["-1", "0", "1"]
class Agent(object):
def __init__(self, position):
self.encoder = OneDDepthEncoder(positions=positions,
radius=5,
wrapAround=True,
nPerPosition=28,
wPerPosition=3,
minVal=0,
maxVal=1)
self.plotter = Plotter(self.encoder)
self.learner = QLearner(ACTIIONS, n=1008)
self.lastState = None
self.lastAction = None
def sync(self, outputData):
if not ("ForwardsSweepSensor" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
if outputData.get("reset"):
print "Reset."
sensor = outputData["ForwardsSweepSensor"]
steer = outputData["steer"]
reward = outputData.get("reward") or 0
encoding = self.encoder.encode(numpy.array(sensor))
if self.lastState is not None:
self.learner.update(self.lastState, str(self.lastAction),
encoding, str(steer), reward)
value = self.learner.value(encoding)
qValues = {}
for action in ACTIIONS:
qValues[action] = self.learner.qValue(encoding, action)
inputData = {}
inputData["qValues"] = qValues
inputData["bestAction"] = self.learner.bestAction(encoding)
self.plotter.update(sensor, encoding, steer, reward, value, qValues)
if outputData.get("reset"):
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
return inputData
class Plotter(object):
def __init__(self, encoder):
self.encoder = encoder
self.sensor = []
self.encoding = []
self.steer = []
self.reward = []
self.value = []
self.qValues = defaultdict(lambda: [])
self.bestAction = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
rcParams.update({'figure.figsize': (6, 9)})
# rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
def update(self, sensor, encoding, steer, reward, value, qValues):
self.sensor.append(sensor)
self.encoding.append(encoding)
self.steer.append(steer)
self.reward.append(reward)
self.value.append(value)
for key, value in qValues.iteritems():
self.qValues[key].append(value)
bestAction = int(max(qValues.iteritems(), key=operator.itemgetter(1))[0])
self.bestAction.append(bestAction)
def render(self):
self.plt.figure(1)
self.plt.clf()
n = 7
self.plt.subplot(n,1,1)
self._plot(self.steer, "Steer over time")
self.plt.subplot(n,1,2)
self._plot(self.reward, "Reward over time")
self.plt.subplot(n,1,3)
self._plot(self.value, "Value over time")
self.plt.subplot(n,1,4)
shape = len(self.encoder.positions), self.encoder.scalarEncoder.getWidth()
encoding = numpy.array(self.encoding[-1]).reshape(shape).transpose()
self._imshow(encoding, "Encoding at time t")
self.plt.subplot(n,1,5)
data = self.encoding
w = self.encoder.w
overlaps = [sum(a & b) / float(w) for a, b in zip(data[:-1], data[1:])]
self._plot(overlaps, "Encoding overlaps between consecutive times")
# for i, action in enumerate(ACTIIONS):
# self.plt.subplot(n,1,4+i)
# self._plot(self.qValues[action], "Q value: {0}".format(action))
# self.plt.subplot(n,1,7)
# self._plot(self.bestAction, "Best action")
self.plt.draw()
self.plt.savefig("q-{0}.png".format(time.time()))
def _plot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
self.plt.plot(range(len(data)), data)
def _imshow(self, data, title):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect='auto',
vmin=0,
vmax=1)
if __name__ == "__main__":
# complete uniform
# positions = [i*20 for i in range(36)]
# forward uniform
positions = [i*10 for i in range(-18, 18)]
agent = Agent(positions)
Server(agent)
| agpl-3.0 |
IndraVikas/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/linear_model/tests/test_logistic.py | 24 | 39507 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag']:
max_iter = 100 if solver == 'sag' else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-2, cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1")
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1")
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| unlicense |
nsat/gnuradio | gr-digital/examples/berawgn.py | 32 | 4886 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0/N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = map(int, numpy.random.randint(0, self.const.arity(), N_BITS/self.const.bits_per_symbol()))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a complex noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(self.const.bits_per_symbol() * 10**(float(EbN0)/10))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print "Eb/N0 = %d dB" % EbN0
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = range(EbN0_min, EbN0_max+1)
ber_theory = [berawgn(x) for x in EbN0_range]
print "Simulating..."
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pylab.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pylab.show()
| gpl-3.0 |
hrjn/scikit-learn | sklearn/metrics/tests/test_common.py | 19 | 43631 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Can't handle mix of binary and continuous",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name not in REGRESSION_METRICS:
continue
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_score)
else:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
wdm0006/sklearn-extensions | examples/kernel_regression/example.py | 1 | 1546 | """
Example from: https://raw.githubusercontent.com/jmetzen/kernel_regression/master/plot_kernel_regression.py
========================================================================
Comparison of kernel regression (KR) and support vector regression (SVR)
========================================================================
Toy example of 1D regression using kernel regression (KR) and support vector
regression (SVR). KR provides an efficient way of selecting a kernel's
bandwidth via leave-one-out cross-validation, which is considerably faster
that an explicit grid-search as required by SVR. The main disadvantages are
that it does not support regularization and is not robust to outliers.
"""
print(__doc__)
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn_extensions.kernel_regression import KernelRegression
np.random.seed(0)
# Generate sample data
X = np.sort(5 * np.random.rand(100, 1), axis=0)
y = np.sin(X).ravel()
# Add noise to targets
y += 0.5 * (0.5 - np.random.rand(y.size))
# Fit regression models
svr = GridSearchCV(SVR(kernel='rbf'), cv=5, param_grid={"C": [1e-1, 1e0, 1e1, 1e2], "gamma": np.logspace(-2, 2, 10)})
kr = KernelRegression(kernel="rbf", gamma=np.logspace(-2, 2, 10))
t0 = time.time()
y_svr = svr.fit(X, y).predict(X)
print("SVR complexity and bandwidth selected and model fitted in %.3f s" % (time.time() - t0))
t0 = time.time()
y_kr = kr.fit(X, y).predict(X)
print("KR including bandwith fitted in %.3f s" % (time.time() - t0)) | bsd-3-clause |
srowen/spark | python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py | 7 | 6774 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BinaryOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([b"1", b"2", b"3"])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser + b"1", pser + b"1")
self.assert_eq(psser + psser, pser + pser)
self.assert_eq(psser + psser.astype("bytes"), pser + pser.astype("bytes"))
self.assertRaises(TypeError, lambda: psser + "x")
self.assertRaises(TypeError, lambda: psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
self.assert_eq(self.psser + self.psser, self.pser + self.pser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assert_eq(b"1" + self.psser, b"1" + self.pser)
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [b"1", b"2", b"3"]
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pd.Series(["1", "2", "3"]), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[b"2", b"3", b"1"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_binary_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
telefar/stockEye | coursera-compinvest1-master/coursera-compinvest1-master/homework/homework/homework5/HW5/hw5_bollinger.py | 3 | 1321 | ## Computational Investing I
## HW 5
##
## Author: alexcpsec
import pandas as pd
import pandas.stats.moments as pdsm
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
def bollinger_bands(ldt_timestamps, ls_symbols, lookback):
dataobj = da.DataAccess('Yahoo')
ls_keys = ['close','actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method = 'ffill')
d_data[s_key] = d_data[s_key].fillna(method = 'bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_close = d_data['close']
df_mean = pd.rolling_mean(df_close, lookback)
df_std = pd.rolling_std(df_close, lookback)
df_bands = (df_close - df_mean) / df_std
if __name__ == '__main__':
dt_start = dt.datetime(2010, 1, 1)
dt_end = dt.datetime(2010, 12, 31)
ls_symbols = ["AAPL", "GOOG", "IBM", "MSFT"]
lookback = 20
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
## Starting up with SP500 2008
bollinger_bands(ldt_timestamps, ls_symbols, lookback)
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
mfjb/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
CTJChen/ctc_astropylib | mlematch.py | 1 | 23653 | import pandas as pd
from ctc_observ import *
from ctc_arrays import *
from scipy.interpolate import pchip
from statsmodels.nonparametric.smoothers_lowess import lowess
# load HSC catalog first
# hsc = pd.read_csv('/cuc36/xxl/multiwavelength/HSC/wide.csv')
def pdf_sep_gen(sep_arcsec, xposerr, opterr, pdf='Rayleigh'):
'''
PDF of angular separation between an X-ray object and the other input
catalog with positional error poserr
'''
if pdf == 'Gaussian':
# that was 2d-normal
poserr = 2 * (opterr ** 2 + xposerr ** 2) # this is 2*sigma^2
return np.exp(-sep_arcsec ** 2 / poserr) / (np.pi * poserr)
else:
poserr = (opterr ** 2 + xposerr ** 2)
return (sep_arcsec / poserr) * np.exp((-sep_arcsec ** 2) / poserr)
def getbkgcat(xcat, catopt, optdf, r_in=7., r_out=35., magonly=False,
nmagbin=15, magname='imag_psf', ora='ra', odec='dec',
corr_glob=False, globonly=False):
'''
Takes in xcat and catopt,
find optical sources with separation from any x-ray sources
between r_in and r_out (in arcsec),
and derive the magnitude dependence of these background sources
optdf = optdf_in.copy()
optdf.reset_index(inplace=True)
if len(catopt) != len(optdf):
print("catopt should be the astropy coordinate object computed from optdf!")
sys.exit(1)
'''
idhsc,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_in*u.arcsec)
#Excluding each optical source with an x-ray source within r_in
itmp=np.arange(len(catopt))
itmp[np.unique(idhsc)]=-1
#indicies for optical sources with **NO** X-ray counterparts within r_in
idhsc_ext=np.where(np.equal(optdf.index.values, itmp))[0]
#Now search for X-ray and optical matches within r_out
idhsc_in,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_out*u.arcsec)
idhsc_in = np.unique(idhsc_in)
#Cross-correlated the ``no r_in list'', and the ``r_out list''
#This will create a list of ``background optical sources''
idhsc_bkgd=np.intersect1d(idhsc_ext,idhsc_in)
hsc_bkgd=optdf.loc[idhsc_bkgd].copy()
hsc_bkgd.reset_index(inplace=True)
if magonly:
return hsc_bkgd[magname].values
else:
out,rmagbin=pd.cut(hsc_bkgd[magname].values,bins=nmagbin,retbins=True)
groups=hsc_bkgd.groupby(out)
#number density = total number of sources divided by the area of annulus
N_xmm=len(xcat) #number of unique XMM sources
N_bkgd=len(hsc_bkgd)
nm=groups[ora].count().values/(np.pi*(r_out**2-r_in**2)*N_xmm)
if corr_glob | globonly:
#According to Brusa et al. 2007, at faint magnitudes
#nm is not correct and should use a global one.
out,rmagbin_global=pd.cut(optdf[magname].values,bins=nmagbin,retbins=True)
groups=optdf.groupby(out)
rmag_global = binvalue(rmagbin_global)
area = \
(optdf[ora].max() - optdf[ora].min())*(optdf[odec].max() - optdf[odec].min())*3600**2
nm_global = groups[ora].count().values/area
iglobal = np.where(rmagbin > 23.)[0][:-1]
if corr_glob:
nm[iglobal] = nm_global[iglobal]
elif globonly:
return nm_global, rmagbin
return nm,rmagbin
#def getqm(match,rmagbin, Q, NX, nm, r0=2.5):
def getqm(match,rmagbin, Q, nm, NX, r0=3.0):
'''
Estimate q(m) -- the expected optical counterpart magnitude
distribution of at magintude m
'''
grp=match.groupby(pd.cut(match['rmag'].values,bins=rmagbin))
real_m=grp.rax.count().values# - np.pi*r0**2*NX*nm
real_m[np.where(real_m < 0.)] = \
0.1*nm[np.where(real_m < 0.)]*np.pi*NX*r0**2
qm = real_m*Q/np.sum(real_m)
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
x_smooth = result[:,0]
y_smooth = result[:,1]
return x_smooth, y_smooth, Q, qm#, real_m
def calc_RCMAX(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for a single LRthreshold value
'''
if type(NX) != float:
NX = float(NX)
LRth = quntarr
tmp = match[match.LR > LRth].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
allmatch = onematch
elif nmx == 1:
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
else:
#regroup, and for each group only keep sources with LR larger than LRfrac*max(LR)
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R = allmatch.Rc.mean()
C = allmatch.Rc.sum()/NX
return allmatch, R, C, LRth
def calc_RC(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for an array of LRthreshold values
if first==True,
quantarr should be between 0 to 1
and the LRthreshold values to be looped through would be
match.LR.quantile(quntarr)
If quntarr is an array with length > 1 (and values between 0 to 1)
This subroutine finds the LRth value that maximize C and R.
'''
if type(NX) != float:
NX = float(NX)
if first:
#if it's the first time, loop through the LR values in quantile arrays
#return R, C, LRth
LRth = match.LR.quantile(quntarr).values
print('first -- ', 'min/max LRth are ', np.min(LRth), np.max(LRth))
else:
LRth = quntarr
R = np.zeros(len(quntarr))
C = np.zeros(len(quntarr))
for index, lrthiter in enumerate(LRth):
tmp = match[match.LR > lrthiter].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
onematch = grp.filter(lambda x: len(x) == 1).copy() #select sources with only one match
#onematch.reset_index(inplace=True)
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
#no x-ray sources have multiple good counterparts
allmatch = onematch
elif nmx == 1:
#only one x-ray sources have multiple good counterparts
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=True)
else:
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
#dropping sources with LR < LRfrac*LRmax
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
#regroup
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R[index] = allmatch.Rc.mean()
C[index] = allmatch.Rc.sum()/NX
return R, C, LRth
def calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX,rsearch=5.0,\
lth = None, LRfrac=0.2,lrmax=None,\
magname = 'imag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid',opterr = 0.1,pdf='Rayleigh',first=False):
'''
input variables:
xdf, xcat, optdf,catopt,optdf,nm, qm, Q, rmag, rsearch=5.0,\
magname = 'rmag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid'
For computing LR for every optical source within rsearch:
'''
if first:
print('first calc_LR')
idxmm, idhsc, d2d , d3d=catopt.search_around_sky(xcat,rsearch*u.arcsec)
match = pd.DataFrame({'xid':idxmm,'optid':idhsc,'dist':d2d.arcsec,\
'rmag':optdf.loc[idhsc,magname].values,'xposerr':xdf.loc[idxmm,xerrname],\
'raopt':optdf.loc[idhsc,ora].values,'decopt':optdf.loc[idhsc,odec].values,\
'rax':xdf.loc[idxmm,xra].values,'decx':xdf.loc[idxmm,xdec].values,\
'optname':optdf.loc[idhsc,opticalid].values})
#print('match len = ',len(match), 'xid nunique = ', match.xid.nunique())
fr = pdf_sep_gen(match.dist.values,match.xposerr.values,opterr,pdf=pdf)
n_m = pchip(rmag, nm)#, bounds_error=False,fill_value='extrapolate')
q_m = pchip(rmag, qm)#, bounds_error=False,fill_value='extrapolate')
fnm = n_m(match.rmag.values)
fqm = q_m(match.rmag.values)
fqm[np.where(fqm < 0.)] = 1e-8
fnm[np.where(fnm < 0.)] = 1e-8
LR = fr*fqm/fnm
match['LR'] = pd.Series(LR, index=match.index)
match['matchid'] = pd.Series(range(len(match)),index=match.index)
match['raoff'] = pd.Series((match.rax - match.raopt)*3600., index=match.index)
match['decoff'] = pd.Series((match.decx - match.decopt)*3600., index=match.index)
#several situations :
#1. all matches are unique, no further action is required.
if match.xid.nunique() - len(match) == 0:
return match, match, 1.0, 1.0, match.LR.min()
else:
if lth is None:
#If the array of lth values is not provided,
#guess it by assuming that only NX sources would be reliable,
#so loop through the LR values around that LR quantile
#qcenter = match.LR.quantile(float(NX)/len(match))
qcenter = 1. - 1.5*float(NX)/len(match)
if qcenter < 0.:
qcenter = 0.1
lth = np.linspace(0.5*qcenter,
min([2.0*qcenter, 0.95]), 30.)
#print(lth)
if lrmax is None:
#first
R, C, LRth = calc_RC(match, lth, Q, NX,LRfrac=LRfrac,first=first)
lthmax = LRth[np.argmax((R+C))]
if not np.isscalar(lthmax):
if len(lthmax) >= 1:
lthmax = lthmax[0]
goodmatch, R, C, LRth = calc_RCMAX(match,lthmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lthmax, LRth
else:
goodmatch, R, C, LRth = calc_RCMAX(match,lrmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lrmax, LRth
def likmatch(xdf, xcat, optdf_in, catopt, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = None,LRfrac=0.5,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
Currently is based on HSC public data release 1
(wide survey) in the XMM-LSS region.
Input: source list data frame or fits filename of the source lists.
See the input parameters for default column names
***Note that ``opticalid''
should be provided for each unique optical source
Default : xdf is in XMM SRCLIST format
optdf is for HSC.
Input parameters:
r0 - radius used for defining q(m)
r_in and r_out - radius used for selecting background sources
(X-ray sources with distance from optical counterparts that's larger than
r_in and smaller than r_out are defined as background sources.)
if (len(catopt) != len(optdf)) or (len(xcat) != len(xdf)) :
print("x/opt catalogs should be the astropy coordinate objects computed from the dataframes!!")
sys.exit(1)
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
#making a copy for output
dfout = xdf.copy(deep=True)
dfout.reset_index(inplace=True)
#Background number surface density
nm, rmagbin = getbkgcat(xcat,catopt,optdf,r_in = r_in, r_out=r_out,
nmagbin=nmagbin, magname = magname,ora=ora,odec=odec)
if verbose:print('Calculating background mag. distribution, nm')
#nm = nm/np.sum(nm)
#find the number of X-ray sources at least one matching withn 1' (sample completeness)
idopt_r0,d2d,d3d=xcat.match_to_catalog_sky(catopt)#,1.0*u.arcmin)
NX = sum(d2d.arcmin <= 1.)
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
print('Q = ', Q, ', N1 = ',N1, ' NX = ', NX)
if (N1 != float(len(idopt_r0))):
print('duplicated optical sources in qm calculation')
opt_qm = optdf.loc[idopt_r0,:]
grp=opt_qm.groupby(pd.cut(opt_qm[magname].values,bins=rmagbin))
total_m=grp[ora].count().values
real_m0=total_m-np.pi*r0**2*NX*nm
real_m0[np.where(real_m0 < 0.)] = 0.1*nm[np.where(real_m0 < 0.)]*np.pi*NX*r0**2
qm0 = real_m0*(Q/np.sum(real_m0))
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm0[index]))
nmarr = np.hstack((nmarr, np.zeros(5) + nm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
rmagsmooth = result[:,0]
qmsmooth = result[:,1]
result = lowess(nmarr,rmagarr,frac=0.2)
#rmagsmooth = result[:,0]
nmsmooth = result[:,1]
#for unrealistical qm values (<0), assuming the real counterpart distribution is the same
#as the background
#qm0[np.where(qm0 < 0.)] = nm[np.where(qm0 < 0.)]
rmag = rmagsmooth#binvalue(rmagbin)
if verbose:print('Calculating initial counterpart mag. dist., qm')
if verbose:print('Calculating background mag. distribution, rmag')
density_raw = pd.DataFrame({
'rmag':binvalue(rmagbin),
'qm0':qm0,
'nm':nm
}
)
density = pd.DataFrame({'rmag':rmag,'qm0':qmsmooth,'qms'+str(np.round(Q,2)):qmsmooth,'nm':nmsmooth})#,'real_ms':real_m0})
#With qm, nm, and Q, calculate the first match
if verbose:print('First LR matching')
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
density_raw['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qmraw,index=density_raw.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print('R, C, len(goodmatch), LRth:' ,R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
break
elif i == max(range(niter)):
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
def likmatch_rerun(xdf, xcat, optdf_in, catopt, density, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = np.linspace(0.05,0.9,10),LRfrac=0.2,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True,rc=False):
'''
similar to likmatch, but requires the density output from likmatch
useful for shift-and-rematch simulations
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
if rc:
return match,goodmatch, R, C
else:
return match,goodmatch
def likmatch_ext(
xdf, xcat, optdf_in, catopt, density, r0=3.0, rsearch=10.0, \
r_in = 10., r_out=50., \
lth = None, LRfrac=0.5, lrmax=None, \
nmagbin=15, niter=10, numid='numid', magname = 'imag_psf',
xerrname='xposerr', xra = 'RA', xdec = 'DEC', \
ora = 'ra', odec = 'dec', opticalid = 'hscid',opterr=0.1, \
pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
different from the original likmatch function,
this one requires an input array true-counterpart mag, which will be used
to calculate q(m) using kernel density estimation
The background mag. distribution nm is optional
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
return match,goodmatch, R, C, lthmax
'''
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
#density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print(R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
#density['qmfinal'] = pd.Series(qm,index=density.index)
break
elif i == max(range(niter)):
print('max niter reached, should check convergence')
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
'''
def finalmatch(match,goodmatch):
match.set_index(match.matchid.values,inplace=True)
mid_all = np.arange(len(match))
mid_all[goodmatch.matchid.values] = -1
badmatch = match.loc[mid_all[mid_all > 0],:]
#if an xid alread has a counterpart in goodmatch, drop it.
badmatch = badmatch[np.in1d(badmatch.xid.values, goodmatch.xid.unique(),invert=True)].copy()
badmatch.reset_index(inplace=True)
bad_ok = badmatch.drop_duplicates('xid',keep=False)
ibad = np.arange(len(badmatch))
ibad[bad_ok.index.values] = -1
bad_bad = badmatch.loc[np.where(ibad > -1)[0],:]
bad_bad.drop('index',axis=1,inplace=True)
okmatch = pd.concat([goodmatch, bad_ok])
return okmatch, bad_bad
| apache-2.0 |
swapnilgt/tablaPercPatternsISMIR | rlcs/run.py | 2 | 14062 | import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut.loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
sylbListFile=config['sylbListFile']
print sylbListFile
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','KI'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'], ['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
# Checking if we want to run for baseline or not.
baseline = False
if "baseline" in sys.argv:
baseline = True
#similarityListFile = os.path.join(sylbSimFolder,'simMatList.txt')
#similarityList = [line.strip().split('.')[0] for line in open(similarityListFile)]
# For correctness check
#similarityList = ['TablaDB_3_kmeans_mahal'] # looks promising ....
#similarityList = ['KLMonteCarlo-5','KLMonteCarlo-6', 'KLMonteCarlo-7'] # looks promising ....
#similarityList = ['KLGaussApprox-3', 'KLGaussApprox-4', 'KLGaussApprox-5', 'KLGaussApprox-6', 'KLGaussApprox-7'] # looks promising ....
similarityList = ['binaryDistance'] # results without similarity
ignrSimList = ['TablaDB_10_GMM_euclidean']
simObject = None
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
#simDict = ut.getSimilarityDict('/home/swapnil/SMC/MasterThesis/sylbSimilarity/TablaDBstrokes','/home/swapnil/SMC/MasterThesis/sylbSimilarity/results_mat/TablaDB_6_kmeans_euclidean.mat')
def getAccuracies(payload, tres = 70.0, fp = None):
#fo.write('run.getAccuracies::')
totalRelvRetrieved = 0
totalRetrieved = 0
totalRelevant = 0
totalRelvRetrievedInGt = 0
ptrInTransLen = [] # List for the length of all the patterns that are the candidate patterns
for inst in payload:
retrieved = inst[0]
relevant = inst[1]
overlaps = inst[2]
retrievedInGT = inst[3] # We have keep this because there are multiple patterns at the same location of the GT (for recall)
if len(overlaps) != retrieved:
print 'The length of the two are not equal..'
print 'retrieved:' + str(retrieved)
print 'len(overlaps):' + str(len(overlaps))
sys.exit()
# Getting the ones that are relevant based on the threshold
relvInTrans, ptrLenInTransFoComp = anls.getRelevantTrans(overlaps, tres = tres)
totalRetrieved += retrieved
totalRelevant += relevant
totalRelvRetrieved += relvInTrans
totalRelvRetrievedInGt += retrievedInGT
ptrInTransLen.extend(ptrLenInTransFoComp)
fp.write('Total patterns retrieved in transcription:' + str(totalRetrieved) + '\n')
fp.write('Total patterns retrieved in ground truth:' + str(totalRelevant) + '\n')
fp.write('Total correct patterns retrieved :' + str(totalRelvRetrieved)+ '\n')
fp.write('Posiions of the GT at which the patterns were recovered :' + str(totalRelvRetrievedInGt)+ '\n')
# Return precision, recall
if totalRetrieved is not 0:
precision = (totalRelvRetrieved * 1.0/totalRetrieved) * 100
else:
precision = None
if totalRelevant is not 0:
recall = (totalRelvRetrievedInGt * 1.0/totalRelevant) * 100
else:
recall = None
return (precision, recall, ptrInTransLen)
def getUniqMatchesForAllComp(query, betaDir, beta, p, formula, baseline = False):
''' This method takes in the masterData, query, compare, beta and p and returns the uniqMatchesList for the entire masterData'''
if not os.path.exists(betaDir):
print 'The base director for logging does not exist.. Exiting' + str(betaDir)
sys.exit()
dataPerComp = []
for compData in masterData:
compName = compData[2]
fComp = open(os.path.join(betaDir,compName + '.log'), 'a')
if baseline is False:
uniqMatchesTrans = rlcs.getUniqMatches(compData[0][0], query, simObject, beta, p, fComp, formula)
else:
uniqMatchesTrans = rlcs.getBaselinePatterns(compData[0][0], query, fComp)
uniqMatchesGT = rlcs.getGTPatterns(compData[1][0], query, fComp)
dataPerComp.append((uniqMatchesTrans, uniqMatchesGT))
fComp.close()
return dataPerComp
def runOneIter(query, compMatchList, scoreTres, tresDir, overlapTres):
if not os.path.exists(tresDir):
print 'The base directory for logging does not exist.. Exiting' + str(tresDir)
sys.exit()
result = []
# List for storing the length of the patterns in the patterns searched (including false positives) in the transcribed score
ptrLenInTrans = []
for compData, compMatches in zip(masterData, compMatchList):
compName = compData[2]
fComp = open(os.path.join(tresDir,compName + '.log'), 'a')
transMatches = ut.getMatchesForTres(compMatches[0], scoreTres = scoreTres)
# getting the pattern length list and adding it to ptrLenInTrans
ptrLenInTrans.extend(ut.getPtrLenList([tup[1] for tup in transMatches]))
GTMatches = compMatches[1]
fComp.write('Running for the query:' + str(query) + '\n')
fComp.write('The matches in transcription that have score above the threshold are:' + str(transMatches) + '\n')
fComp.write('The matches in ground truth that have score above the threshold are:' + str(GTMatches) + '\n')
# Getting the onsets ..
origOnsets = compData[1][1]
transOnsets = compData[0][1]
#transOnsets = compData[1][1]
# Find the closest match of the transcribed pattern based on the time overlap ...
# TODO: Remove thses print statements later
#print 'transMatches=' + str(transMatches)
#print 'GTMatches=' + str(GTMatches)
overlaps, notFound = anls.getClosestPairs(transMatches, GTMatches, origOnsets, transOnsets, overlapTres)
# Get the length of the patterns in the overlaps
fComp.write('The overlaps are:' + str(overlaps) + '\n')
fComp.write('The patterns in ground truth that do not have any match:' + str(notFound) + '\n')
# Getting the transcription for the positions where there is no hit by the RLCS algorithm..
falseNegs = anls.getFalseNegativesInTranscription(compData[0][0], notFound, origOnsets, transOnsets)
fComp.write('The patterns in transcription which are not discovered by RLCS:' + str(falseNegs) + '\n')
fComp.write('The number of matches in transcription that have score above the threshold are:' + str(len(transMatches)) + '\n')
fComp.write('The number matches in ground truth that have score above the threshold are:' + str(len(GTMatches)) + '\n')
fComp.write('The number overlaps are:' + str(len(overlaps)) + '\n')
fComp.write('The patterns in ground truth that do not have any match:' + str(len(notFound)) + '\n')
fComp.write('The patterns in transcription which are not discovered by RLCS:' + str(len(falseNegs)) + '\n\n')
if len(transMatches) != len(overlaps):
print 'The size of the overlaps and the matches in the transcription are different.'
sys.exit()
result.append((len(transMatches), len(GTMatches), overlaps, len(GTMatches) - len(notFound)))
fComp.close()
fAll = open(os.path.join(tresDir,'master.log'), 'a')
fAll.write('Running for the query:' + str(query) + '\n')
precision, recall, ptrLenInTransOverlaps = getAccuracies(result, tres = overlapTres, fp = fAll)
fAll.write('The precision is:' + str(precision) + '\n')
fAll.write('The recall is:' + str(recall) + '\n\n')
fAll.close()
# Writing the result per query into the pkl file ..
ptrObj = (precision, recall, ptrLenInTransOverlaps, ptrLenInTrans) # added ptrLenInTrans to have array for the length of the patterns that have been retrieved
fPtrName = os.path.join(tresDir,'!'.join(query)+ '.pkl')
resFile = open(fPtrName, 'wb')
pkl.dump(ptrObj, resFile)
resFile.close()
return result
def runWithScoreTres(uniqMatchListPerQuery, queryList, betaDir, overlapTres):
scoreMinTres = 0.0
scoreMaxTres = 1.0
scoreStep = 0.05
if not os.path.exists(betaDir):
print 'The base directory for logging does not exist.. Exiting' + str(betaDir)
sys.exit()
result = []
scoreTres = scoreMinTres
while scoreTres <= scoreMaxTres:
resPerQuery = []
tresDir = os.path.join(betaDir,'scoreTres_' + str(scoreTres))
os.makedirs(tresDir)
for query, compMatchList in zip(queryList, uniqMatchListPerQuery):
resPerComp = runOneIter(query, compMatchList, scoreTres, tresDir, overlapTres)
resPerQuery.extend(resPerComp)
fAll = open(os.path.join(tresDir,'master.log'), 'a')
precision, recall, ptrLenInTrans = getAccuracies(resPerQuery, tres = overlapTres, fp = fAll)
fAll.write('The precision is:' + str(precision) + '\n')
fAll.write('The recall is:' + str(recall) + '\n')
fAll.close()
result.append((scoreTres,(precision, recall)))
scoreTres += scoreStep
# Dumping output to the pickle file
resFileName = os.path.join(betaDir, 'result.pkl')
resFile = open(resFileName, 'wb')
pkl.dump(result, resFile)
resFile.close()
# Plotting a graph and saving it
imgFile = os.path.join(betaDir, 'figure.png')
#plotRes(resFileName, 'tres', imgFile)
return result
def runWithBeta(queryList, pDir, betaMin, betaMax, betaStep, p, overlapTres, formula, baseline = False):
if not os.path.exists(pDir):
print 'The base directory for logging does not exist.. Exiting' + str(pDir)
sys.exit()
result = []
beta = betaMin
while(beta <= betaMax):
print '###### Running iteration for a new beta = '+ str(beta)+' #######'
uniqMatchListPerQuery = []
# Appending the feature type to the subDir name.
betaDir = 'beta_' + str(beta)
betaDir = os.path.join(pDir, betaDir)
os.makedirs(betaDir)
for query in queryList:
# Should get data from both the transcription and the ground truth...
uniqMatchDataPerQuery = getUniqMatchesForAllComp(query, betaDir, beta, p, formula, baseline = baseline)
uniqMatchListPerQuery.append(uniqMatchDataPerQuery)
resScore = runWithScoreTres(uniqMatchListPerQuery, queryList, betaDir, overlapTres)
result.append((beta, resScore))
beta += betaStep
# Dumping output to the pickle file
resFileName = os.path.join(pDir, 'result.pkl')
resFile = open(resFileName, 'wb')
pkl.dump(result, resFile)
resFile.close()
return result
def runWithAllParams(queryList, logDir, betaMin = 0.01, betaMax = 0.99, betaStep = 0.05, pMax = 1.0, pStep = 0.05, overlapTres = 70.0, formula = None, baseline = False):
result = []
#if not os.path.exists(logDir):
#print 'The directory for outputting results does not exist!! Exiting..'
#sys.exit()
timeStamp = time.strftime("%Y-%m-%d_%H-%M-%S",time.gmtime(time.time())) # get the current timestamp !
baseDir = os.path.join(logDir, timeStamp)
os.makedirs(baseDir) # Creating the directory with timestamp
# Getting the minimum p based on the
minLenQ = 100
for query in queryList:
if len(query) < minLenQ:
minLenQ = len(query)
p = 1.1 / minLenQ
if baseline is True:
p = 0.0
while p <= pMax:
print '$$$$$ Running iteration for a new p = '+ str(p)+' $$$$$'
pDir = os.path.join(baseDir, 'p_' + str(p))
os.makedirs(pDir) # Creating the directory for the p value
pResult = runWithBeta(queryList, pDir, betaMin = betaMin, betaMax = betaMax,\
betaStep = betaStep, p = p, overlapTres = overlapTres, formula = formula, baseline = baseline)
result.append((p, pResult))
p += pStep
# Dumping output to the pickle file
resFileName = os.path.join(baseDir, 'result.pkl')
resFile = open(resFileName, 'wb')
pkl.dump(result, resFile)
resFile.close()
return result
if __name__ == '__main__':
for similarity in similarityList:
if similarity in ignrSimList:
print 'Ignoring similarity..Going to next similarity measure!!'
continue
simDict = ut.getSimilarityDict(sylbListFile, os.path.join(sylbSimFolder, similarity + '.mat'))
#simDict = ut.readPickle(os.path.join('/home/swapnil/SMC/MasterThesis/gitPercPatterns/code/sylbSimilarity/sim', similarity + '.pkl'))
simTresMax = 0.9
simTresStep = 0.3
simTres = 0.3
# In case the baseline is true.
if baseline is True:
simTres = 0.9
print 'Running for similarity:' + similarity
formula = 2
while simTres <= simTresMax:
resultSimDir = os.path.join(resultDir, 'formula' + str(formula), similarity, similarity + '_' + str(simTres))
simObject = ut.Similarity(simDict, simTres) # second arguments is the threshold for the distance between the two sylbls.
if baseline is False:
runWithAllParams(queryList, resultSimDir, formula = formula, baseline = baseline)
else:
runWithAllParams(queryList, resultSimDir, formula = formula, betaMin = 0.0, betaMax = 0.0, pMax = 0.0, baseline = baseline)
print resultSimDir
print simObject.tres
simTres += simTresStep
| agpl-3.0 |
PythonProgramming/Support-Vector-Machines---Basics-and-Fundamental-Investing-Project | p26.py | 2 | 3329 | # back testing
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, preprocessing
import pandas as pd
from matplotlib import style
import statistics
from collections import Counter
style.use("ggplot")
how_much_better = 5
FEATURES = [
'DE Ratio',
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior '
]
def Status_Calc(stock, sp500):
difference = stock - sp500
if difference > how_much_better:
return 1
else:
return 0
def Build_Data_Set():
# data_df = pd.DataFrame.from_csv("key_stats_acc_perf_WITH_NA_enhanced.csv")
data_df = pd.DataFrame.from_csv("key_stats_acc_perf_NO_NA_enhanced.csv")
# shuffle data:
data_df = data_df.reindex(np.random.permutation(data_df.index))
data_df = data_df.replace("NaN",0).replace("N/A",0)
# data_df = data_df.replace("NaN",-999).replace("N/A",-999)
data_df["Status2"] = list(map(Status_Calc, data_df["stock_p_change"], data_df["sp500_p_change"]))
X = np.array(data_df[FEATURES].values)#.tolist())
y = ( data_df["Status2"]
.replace("underperform",0)
.replace("outperform",1)
.values.tolist()
)
X = preprocessing.scale(X)
Z = np.array( data_df[ ["stock_p_change", "sp500_p_change"] ] )
return X,y,Z
def Analysis():
test_size = 1
invest_amount = 10000 # dollars
total_invests = 0
if_market = 0
if_strat = 0
X, y, Z = Build_Data_Set()
print(len(X))
clf = svm.SVC(kernel="linear", C=1.0)
clf.fit(X[:-test_size],y[:-test_size]) # train data
correct_count = 0
for x in range(1, test_size+1):
invest_return = 0
market_return = 0
if clf.predict(X[-x])[0] == y[-x]: # test data
correct_count += 1
if clf.predict(X[-x])[0] == 1:
invest_return = invest_amount + (invest_amount * (Z[-x][0] / 100.0))
market_return = invest_amount + (invest_amount * (Z[-x][1] / 100.0))
total_invests += 1
if_market += market_return
if_strat += invest_return
data_df = pd.DataFrame.from_csv("forward_sample_NO_NA.csv")
# data_df = pd.DataFrame.from_csv("forward_sample_WITH_NA.csv")
data_df = data_df.replace("NaN",0).replace("N/A",0)
X = np.array(data_df[FEATURES].values)
X = preprocessing.scale(X)
Z = data_df["Ticker"].values.tolist()
invest_list = []
for i in range(len(X)):
p = clf.predict(X[i])[0]
if p == 1:
# print(Z[i])
invest_list.append(Z[i])
# print(len(invest_list))
# print(invest_list)
return invest_list
# Analysis()
final_list = []
loops = 8
for x in range(loops):
stock_list = Analysis()
for e in stock_list:
final_list.append(e)
x = Counter(final_list)
print('_'*120)
for each in x:
if x[each] > loops - (loops/3):
print(each)
| mit |
adykstra/mne-python | mne/viz/tests/test_epochs.py | 1 | 8962 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: Simplified BSD
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, pick_types, read_cov, create_info,
EpochsArray)
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.utils import run_tests_if_main
from mne.viz import plot_drop_log
from mne.viz.utils import _fake_click
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 1.0
layout = read_layout('Vectorview-all')
def _get_epochs(stop=5, meg=True, eeg=False, n_chan=20):
"""Get epochs."""
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=meg, eeg=eeg, stim=False,
ecg=False, eog=False, exclude='bads')
# Use a subset of channels for plotting speed
picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:stop], event_id, tmin, tmax, picks=picks,
proj=False)
epochs.info.normalize_proj() # avoid warnings
return epochs
def test_plot_epochs(capsys):
"""Test epoch plotting."""
epochs = _get_epochs().load_data()
assert len(epochs.events) == 1
epochs.info['lowpass'] = 10. # allow heavy decim during plotting
epochs.plot(scalings=None, title='Epochs')
plt.close('all')
# covariance / whitening
cov = read_cov(cov_fname)
assert len(cov['names']) == 366 # all channels
assert cov['bads'] == []
assert epochs.info['bads'] == [] # all good
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# add a channel to the epochs.info['bads']
epochs.info['bads'] = [epochs.ch_names[0]]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# add a channel to cov['bads']
cov['bads'] = [epochs.ch_names[1]]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# have a data channels missing from the covariance
cov['names'] = cov['names'][:306]
cov['data'] = cov['data'][:306][:306]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# other options
fig = epochs[0].plot(picks=[0, 2, 3], scalings=None)
fig.canvas.key_press_event('escape')
plt.close('all')
keystotest = ['b', 'b', 'left', 'right', 'up', 'down',
'pageup', 'pagedown', '-', '+', '=',
'f11', 'home', '?', 'h', 'o', 'end']
fig = epochs.plot()
with pytest.warns(None): # sometimes matplotlib warns about limits
for key in keystotest:
fig.canvas.key_press_event(key)
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
fig.canvas.resize_event()
fig.canvas.close_event() # closing and epoch dropping
plt.close('all')
pytest.raises(ValueError, epochs.plot, picks=[])
plt.close('all')
fig = epochs.plot(events=epochs.events)
# test mouse clicks
data_ax = fig.get_axes()[0]
x = data_ax.get_xlim()[1] / 2
y = data_ax.get_ylim()[0] / 2
n_epochs = len(epochs)
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad epoch
_fake_click(fig, data_ax, [x, y], xform='data') # unmark a bad epoch
_fake_click(fig, data_ax, [0.5, 0.999]) # click elsewhere in 1st axes
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
_fake_click(fig, data_ax, [-0.1, 0.9], button=3)
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change epochs
_fake_click(fig, fig.get_axes()[3], [0.5, 0.5]) # change channels
fig.canvas.close_event() # closing and epoch dropping
assert(n_epochs - 1 == len(epochs))
plt.close('all')
epochs.plot_sensors() # Test plot_sensors
plt.close('all')
# gh-5906
epochs = _get_epochs(None).load_data()
epochs.load_data()
assert len(epochs) == 7
epochs.info['bads'] = [epochs.ch_names[0]]
capsys.readouterr()
fig = epochs.plot(n_epochs=3)
data_ax = fig.get_axes()[0]
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('right') # move right
x = fig.get_axes()[0].get_xlim()[1] / 6.
y = fig.get_axes()[0].get_ylim()[0] / 2
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad epoch
fig.canvas.key_press_event('left') # move back
out, err = capsys.readouterr()
assert 'out of bounds' not in out
assert 'out of bounds' not in err
fig.canvas.close_event()
assert len(epochs) == 6
plt.close('all')
def test_plot_epochs_image():
"""Test plotting of epochs image."""
epochs = _get_epochs()
epochs.plot_image(picks=[1, 2])
epochs.plot_image(picks='mag')
overlay_times = [0.1]
epochs.plot_image(picks=[1], order=[0], overlay_times=overlay_times,
vmin=0.01, title="test"
)
epochs.plot_image(picks=[1], overlay_times=overlay_times, vmin=-0.001,
vmax=0.001)
pytest.raises(ValueError, epochs.plot_image,
picks=[1], overlay_times=[0.1, 0.2])
pytest.raises(ValueError, epochs.plot_image,
picks=[1], order=[0, 1])
pytest.raises(ValueError, epochs.plot_image, axes=dict(), group_by=list(),
combine='mean')
pytest.raises(ValueError, epochs.plot_image, axes=list(), group_by=dict(),
combine='mean')
pytest.raises(ValueError, epochs.plot_image, group_by='error',
picks=[1, 2])
pytest.raises(ValueError, epochs.plot_image, units={"hi": 1},
scalings={"ho": 1})
assert len(epochs.plot_image(picks=["eeg", "mag", "grad"])) < 6
epochs.load_data().pick_types(meg='mag')
epochs.info.normalize_proj()
epochs.plot_image(group_by='type', combine='mean')
epochs.plot_image(group_by={"1": [1, 2], "2": [1, 2]}, combine='mean')
epochs.plot_image(vmin=lambda x: x.min())
pytest.raises(ValueError, epochs.plot_image, axes=1, fig=2)
ts_args = dict(show_sensors=False)
with pytest.warns(RuntimeWarning, match='fall outside'):
epochs.plot_image(overlay_times=[1.1], combine="gfp", ts_args=ts_args)
pytest.raises(ValueError, epochs.plot_image, combine='error',
ts_args=ts_args)
with pytest.raises(NotImplementedError, match='currently'):
epochs.plot_image(ts_args=dict(invert_y=True))
plt.close('all')
def test_plot_drop_log():
"""Test plotting a drop log."""
epochs = _get_epochs()
pytest.raises(ValueError, epochs.plot_drop_log)
epochs.drop_bad()
epochs.plot_drop_log()
plot_drop_log([['One'], [], []])
plot_drop_log([['One'], ['Two'], []])
plot_drop_log([['One'], ['One', 'Two'], []])
plt.close('all')
def test_plot_butterfly():
"""Test butterfly view in epochs browse window."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 50, 30, 20
sfreq = 1000.
data = np.sin(rng.randn(n_epochs, n_channels, n_times))
events = np.array([np.arange(n_epochs), [0] * n_epochs, np.ones([n_epochs],
dtype=np.int)]).T
chanlist = ['eeg' if chan < n_channels // 3 else 'ecog'
if chan < n_channels // 2 else 'seeg'
for chan in range(n_channels)]
info = create_info(n_channels, sfreq, chanlist)
epochs = EpochsArray(data, info, events)
fig = epochs.plot(butterfly=True)
keystotest = ['b', 'b', 'left', 'right', 'up', 'down',
'pageup', 'pagedown', '-', '+', '=',
'f11', 'home', '?', 'h', 'o', 'end']
for key in keystotest:
fig.canvas.key_press_event(key)
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
fig.canvas.resize_event()
fig.canvas.close_event() # closing and epoch dropping
plt.close('all')
def test_plot_psd_epochs():
"""Test plotting epochs psd (+topomap)."""
epochs = _get_epochs()
epochs.plot_psd()
pytest.raises(RuntimeError, epochs.plot_psd_topomap,
bands=[(0, 0.01, 'foo')]) # no freqs in range
epochs.plot_psd_topomap()
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
devincornell/semanticanlysis | dictionary.py | 1 | 5447 | import spacy
from .parallel import *
import re
import functools
#from .topicmodel import TopicModel
''' This class parses data according to the MFD format. Available are the empath, mft and mft2.0 dictionaries.
For further information about the format of the dictionary files, see Haidt et al.'s example:
http://www.moralfoundations.org/sites/default/files/files/downloads/moral%20foundations%20dictionary.dic
'''
class DictionaryParser(object):
### VVVVVVVVVVVV SKLEARN STUFF VVVVVVVVVVVVVV ###
def __init__(self, **params):
self.set_params(**params)
def get_params(self, deep):
return self.params
def set_params(self, **params):
self.params = params
self.dictfile = params['dictfile'] # required: dict file
self.workers = params['workers'] if 'workers' in params.keys() else 1
self.verb = params['verbose'] if 'verbose' in params.keys() else False
self.nlp = spacy.load(params['lang']) if 'lang' in params.keys() else spacy.load('en')
def fit(self, X, y):
# pretending to be sklearn library
return self
### ^^^^^^^^^^^^ SKLEARN STUFF ^^^^^^^^^^^^^^ ###
def __init__(self, **params):
''' Sets self.dictionary (word:category) and self.dictionary_regex (word:regex_pattern) '''
self.set_params(**params)
# read dict file (sometimes .dic): automatically reads nummap and values
self.nummap = dict()
self.dictionary = dict()
self.dictionary_regex = dict()
wordmode = True
with open(self.dictfile, 'r') as f:
for line in f.readlines():
ent = line.strip().split()
if line[0] == '%':
wordmode = not wordmode
elif len(ent) > 0:
if wordmode:
wordkey = ''.join([e for e in ent if e not in self.nummap.keys()])
self.dictionary[wordkey] = [self.nummap[e] for e in ent if e in self.nummap.keys()]
else:
self.nummap[ent[0]] = ent[1]
self.category_words = {c:list() for c in self.nummap.values()}
for w,cats in self.dictionary.items():
for cat in cats:
self.category_words[cat].append(w)
# convenient class members
self.categories = list(self.nummap.values())
def score_categories(self, X, categories=None, summed=False):
'''X is a list of bow (bow in list/set format) (used traditional ml notation)'''
def worker(x, dat=None):
''' Worker thread. dat includes either None (in single core mode) or dictionary,
dictionary_regex, and found (for multicore). '''
# parallel/nonparallel details
if dat is None:
dat = x[1]
x = x[0]
# unpack static data
dictionary, dictionary_regex, categories = dat['dictionary'], dat['dictionary_regex'], dat['cats']
# score foundations
dist = {c:0 for c in categories}
for w,value_cats in dictionary.items(): # dict words
if w in x: # substring check: faster than regex
for cat in value_cats:
if cat in categories: # do I really need this?
dist[cat] += 1
return dist
if categories is None:
categories = self.categories
#if self.verb: print('using spacy to parse for prediction')
#X = [[w.text for w in x] for x in self.nlp.pipe(X, disable=['ner', 'textcat', 'parser'], n_threads=self.workers)]
dat = {'dictionary':self.dictionary, 'dictionary_regex':self.dictionary_regex, 'cats':categories}
if self.workers > 1:
if self.verb: print('Running prediction with', self.workers, 'cores.')
dists = parmap(worker, X, dat, workers=self.workers)
else:
if self.verb: print('Running prediction on single core.')
dists = list(map(worker, [(x,dat) for x in X]))
if summed: # sum up counts across all inputs
combine = lambda x,y: {k:x[k]+y[k] for k in x.keys()}
catsum = functools.reduce(combine, dists)
return catsum
else:
return dists
'''
def predict(self, X):
yhat = list()
for score in scores:
mftvalues = {c:0 for c in self.basenames.values()}
for c in self.basenames.keys():
if c != 'MoralityGeneral':
#print('shitter', self.basenames[c])
mftvalues[self.basenames[c]] += score[c]
fscores = [(k,v) for k,v in mftvalues.items()]
sortbf = list(sorted(fscores, key=lambda x:x[1], reverse=True))
yhat.append( [f[0] for f in sortbf if f[1] == sortbf[0][1] and f[1] > 0])
#scores = parmap(scores, mft_base_old)
return yhat
def score(self, X, y):
y = list(y) # avoid weird indexing things if y is series/df
yhat = self.predict(X)
# currently set to check prediction level
return np.mean([(1 if y[i] in yh else 0) for i,yh in enumerate(yhat)])
'''
| mit |
adykstra/mne-python | mne/channels/channels.py | 1 | 52814 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
import numpy as np
from scipy import sparse
from ..utils import (verbose, logger, warn, copy_function_doc_to_method_doc,
_check_preload, _validate_type, fill_doc, _check_option)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx)
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects.
Parameters
----------
candidates : list
list Raw | Epochs | Evoked | AverageTFR
%(verbose)s
Notes
-----
This function operates inplace.
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import _BaseTFR
for candidate in candidates:
_validate_type(candidate,
(BaseRaw, BaseEpochs, Evoked, _BaseTFR),
"Instances to be modified",
"Raw, Epochs, Evoked or TFR")
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identifying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
verbose=None):
"""Specify which reference to use for EEG data.
By default, MNE-Python will automatically re-reference the EEG signal
to use an average reference (see below). Use this function to
explicitly specify the desired reference for EEG. This can be either an
existing electrode or a new virtual channel. This function will
re-reference the data according to the desired reference and prevent
MNE-Python from automatically adding an average reference projection.
Some common referencing schemes and the corresponding value for the
``ref_channels`` parameter:
No re-referencing:
If the EEG data is already using the proper reference, set
``ref_channels=[]``. This will prevent MNE-Python from
automatically adding an average reference projection.
Average reference:
A new virtual reference electrode is created by averaging the
current EEG signal by setting ``ref_channels='average'``. Bad EEG
channels are automatically excluded if they are properly set in
``info['bads']``.
A single electrode:
Set ``ref_channels`` to a list containing the name of the channel
that will act as the new reference, for example
``ref_channels=['Cz']``.
The mean of multiple electrodes:
A new virtual reference electrode is created by computing the
average of the current EEG signal recorded from two or more
selected channels. Set ``ref_channels`` to a list of channel names,
indicating which channels to use. For example, to apply an average
mastoid reference, when using the 10-20 naming scheme, set
``ref_channels=['M1', 'M2']``.
Parameters
----------
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference. To
apply an average reference, specify ``'average'`` here (default).
If an empty list is specified, the data is assumed to already have
a proper reference and MNE will not attempt any re-referencing of
the data. Defaults to an average reference.
projection : bool
If ``ref_channels='average'`` this argument specifies if the
average reference should be computed as a projection (True) or not
(False; default). If ``projection=True``, the average reference is
added as a projection and is not applied to the data (it can be
applied afterwards with the ``apply_proj`` method). If
``projection=False``, the average reference is directly applied to
the data. If ``ref_channels`` is not ``'average'``, ``projection``
must be set to ``False`` (the default in this case).
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
See Also
--------
mne.set_bipolar_reference : Convenience function for creating bipolar
references.
Notes
-----
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference, the data must be preloaded. This is
not necessary if ``ref_channels='average'`` and ``projection=True``.
4. For an average reference, bad EEG channels are automatically
excluded if they are properly set in ``info['bads']``.
.. versionadded:: 0.9.0
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
@verbose
def set_montage(self, montage, set_dig=True, verbose=None):
"""Set EEG sensor configuration and head digitization.
Parameters
----------
montage : instance of Montage | instance of DigMontage | str | None
The montage to use (None removes any location information).
set_dig : bool
If True, update the digitization information (``info['dig']``)
in addition to the channel positions (``info['chs'][idx]['loc']``).
.. versionadded: 0.15
%(verbose_meth)s
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
from .montage import _set_montage
_set_montage(self.info, montage, set_dig=set_dig)
return self
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%s)' % ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show)
@copy_function_doc_to_method_doc(anonymize_info)
def anonymize(self):
"""
.. versionadded:: 0.13.0
"""
anonymize_info(self.info)
if hasattr(self, 'annotations'):
# XXX : anonymize should rather subtract a random date
# rather than setting it to None
self.annotations.orig_time = None
self.annotations.onset -= self._first_time
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, include=(), exclude='bads', selection=None,
verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
return self._pick_drop_channels(idx)
def pick_channels(self, ch_names):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
return self._pick_drop_channels(
pick_channels(self.info['ch_names'], ch_names))
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..time_frequency import AverageTFR, EpochsTFR
_check_preload(self, 'adding, dropping, or reordering channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
self._data = self._data.take(idx, axis=axis)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
force_update_info : bool
If True, force the info for objects to be appended to match the
values in `self`. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin=(0., 0., 0.04), verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'``, which means a head-digitization-based origin
fit. Default is ``(0., 0., 0.04)``.
.. versionadded:: 0.17
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
_check_preload(self, "interpolation")
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode, origin=origin)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtrip.org/template/neighbours>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_connectivity : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
find_ch_connectivity
Notes
-----
This function is closely related to :func:`find_ch_connectivity`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_connectivity` can compute the connectivity matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
def find_ch_connectivity(info, ch_type):
"""Find the connectivity matrix for the given channels.
This function tries to infer the appropriate connectivity matrix template
for the given channels. If a template is not found, the connectivity matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_connectivity : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
read_ch_connectivity
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate connectivity matrix template only
works for MEG data at the moment. This means that the connectivity matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_connectivity` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_neuromag_122_grad:
conn_name = 'neuromag122'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
if conn_name is not None:
logger.info('Reading connectivity matrix for %s.' % conn_name)
return read_ch_connectivity(conn_name)
logger.info('Could not find a connectivity matrix for the data. '
'Computing connectivity based on Delaunay triangulations.')
return _compute_ch_connectivity(info, ch_type)
def _compute_ch_connectivity(info, ch_type):
"""Compute channel connectivity matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_connectivity
from ..channels.layout import _auto_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute connectivity '
'matrix.')
xy = _auto_topomap_coords(info, picks[::2]) # only for one of the pair
else:
xy = _auto_topomap_coords(info, picks)
tri = Delaunay(xy)
neighbors = spatial_tris_connectivity(tri.simplices)
if combine_grads:
ch_connectivity = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_connectivity[idx * 2 + ii, neigbs * 2 + jj] = True
ch_connectivity[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_connectivity = sparse.csr_matrix(ch_connectivity)
else:
ch_connectivity = sparse.lil_matrix(neighbors)
ch_connectivity.setdiag(np.repeat(1, ch_connectivity.shape[0]))
ch_connectivity = ch_connectivity.tocsr()
return ch_connectivity, ch_names
def fix_mag_coil_types(info):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in `midline` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in `info["ch_names"]`. If possible, this
lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the `Midline`
key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
| bsd-3-clause |
mfjb/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
wdurhamh/statsmodels | examples/python/tsa_dates.py | 29 | 1169 |
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
| bsd-3-clause |
plissonf/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
anntzer/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 43 | 2170 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0.5], linewidths=2,
colors=['k'])
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors=(0, 0, 0))
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
ronfung/incubator-airflow | airflow/contrib/hooks/bigquery_hook.py | 6 | 39980 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import logging
import time
from apiclient.discovery import build, HttpError
from googleapiclient import errors
from builtins import range
from pandas_gbq.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
from past.builtins import basestring
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None, dialect='legacy'):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
:param parameters: The parameters to render the SQL query with (not used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
:type dialect: string in {'legacy', 'standard'}, default 'legacy'
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service, dialect=dialect)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook
must provide access to the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id
).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False, dialect='legacy'):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
schema_update_options=()):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: list
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = ["CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS"]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION',
"ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options)
)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if schema_fields:
configuration['load']['schema'] = {
'fields': schema_fields
}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
logging.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options)
)
configuration['load']['schemaUpdateOptions'] = schema_update_options
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
else:
logging.info('Waiting for job to complete : %s, %s', self.project_id, job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
logging.info('%s: Retryable error, waiting for job to complete: %s', err.resp.status, job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s', err.resp.status)
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception((
'{var}Use either : or . to specify project '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
rouseguy/scipy2015_tutorial | check_env.py | 6 | 2002 | problems = 0
try:
import IPython
print('IPython', IPython.__version__)
assert(IPython.__version__ >= '3.0')
except ImportError:
print("IPython version 3 is not installed. Please install via pip or conda.")
problems += 1
try:
import numpy
print('NumPy', numpy.__version__)
assert(numpy.__version__ >= '1.9')
except ImportError:
print("Numpy version 1.9 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import pandas
print('pandas', pandas.__version__)
assert(pandas.__version__ >= '0.16')
except ImportError:
print("pandas version 0.16 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import scipy
print('SciPy', scipy.__version__)
except ImportError:
print("SciPy is not installed. Please install via pip or conda.")
problems += 1
try:
import matplotlib
print('matplotlib', matplotlib.__version__)
except ImportError:
print("matplotlib is not installed. Please install via pip or conda.")
problems += 1
try:
import theano
print('Theano', theano.__version__)
except ImportError:
print("Theano is not installed. Please install via pip or conda.")
problems += 1
try:
import pymc3
print('PyMC', pymc3.__version__)
except ImportError:
print("PyMC 3 is not installed. Please install via pip:\npip install -U git+git://github.com/pymc-devs/pymc3.git")
problems += 1
try:
import sklearn
print('scikit-learn', sklearn.__version__)
except ImportError:
print("scikit-learn is not installed. Please install via pip or conda.")
problems += 1
try:
import patsy
print('patsy', patsy.__version__)
except ImportError:
print("patsy is not installed. Please install via pip or conda.")
problems += 1
if not problems:
print("\nEverything's cool")
else:
print('There are', problems, 'problems. Please ensure all required components are installed.') | cc0-1.0 |
jkibele/OpticalRS | OpticalRS/Lyzenga2006.py | 1 | 23015 | # -*- coding: utf-8 -*-
"""
Lyzenga2006
===========
This module implements methods described in Lyzenga et al. 2006. The methods
implemented so far are mostly the image preprocessing steps.
This implementation is the work of the author of this code (Jared Kibele), not
the authors of the original paper. I tried to get it right but I'm not making
any promises. Please check your results and let me know if you find any
problems.
References
----------
Lyzenga, D.R., Malinas, N.P., Tanis, F.J., 2006. Multispectral bathymetry using
a simple physically based algorithm. Geoscience and Remote Sensing, IEEE
Transactions on 44, 2251 –2259. doi:10.1109/TGRS.2006.872909
Armstrong, R.A., 1993. Remote sensing of submerged vegetation canopies for
biomass estimation. International Journal of Remote Sensing 14, 621–627.
doi:10.1080/01431169308904363
Ji, W., Civco, D., Kennard, W., 1992. Satellite remote bathymetry: a new
mechanism for modeling. Photogrammetric Engineering and Remote Sensing 58,
545–549.
Notes
-----
I developed this in ClassificationDev/Lyzenga/Lyzenga2006/DeepWaterMasking.ipynb
"""
import numpy as np
from skimage.filters import rank
from skimage import morphology
from sklearn.linear_model import LinearRegression
import itertools
from collections import OrderedDict
def get_fit( ind, x_train, y_train, n_jobs=4 ):
"""
Get a linear regression fit object from Scikit-learn for eq. 9 from Lyzenga
et al. 2006.
Parameters
----------
ind : list of integers
The zero-indexed band numbers to use in the linear regression.
x_train : np.ma.MaskedArray
The image array. If you're following Lyzenga et al. 2006, these will be
radiance values transformed according to Lyzenga 1978 eq. 7 (see Lyzenga
et al. 2006 eq. 8).
y_train : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_train`. This
array must have the same dimensions as a single band of `x_train`.
Returns
-------
fit object
For information on this object see: http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
"""
if x_train.ndim > 2:
nbands = x_train.shape[-1]
x_train = x_train.compressed().reshape(-1,nbands)
skols = LinearRegression(n_jobs=n_jobs)
skolsfit = skols.fit(x_train[...,ind],y_train.compressed())
return skolsfit
def get_selfscore( ind, x_train, y_train, n_jobs=4 ):
"""
Get the r^2 value from linear regression fit object from Scikit-learn for
eq. 9 from Lyzenga et al. 2006.
Parameters
----------
ind : list of integers
The zero-indexed band numbers to use in the linear regression.
x_train : np.ma.MaskedArray
The image array. If you're following Lyzenga et al. 2006, these will be
radiance values transformed according to Lyzenga 1978 eq. 7 (see Lyzenga
et al. 2006 eq. 8).
y_train : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_train`. This
array must have the same dimensions as a single band of `x_train`.
Returns
-------
float
The r^2 value for the linear regression.
"""
if x_train.ndim > 2:
nbands = x_train.shape[-1]
x_train = x_train.compressed().reshape(-1,nbands)
fit = get_fit( ind, x_train, y_train, n_jobs=n_jobs )
return fit.score( x_train[...,ind], y_train.compressed() )
def ranked_combos(x_img,y_depths,n=2):
"""
Rank all possible combinations of `n` bands from `x_img` based on r^2 values
from eq. 9 (Lyzenga et al. 2006).
Parameters
----------
x_img : np.array or MaskedArray
The image array. If you're following Lyzenga et al. 2006, these will be
radiance values transformed according to Lyzenga 1978 eq. 7 (see Lyzenga
et al. 2006 eq. 8).
y_depths : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_img`. This array
must have the same dimensions as a single band of `x_img`.
n : int, optional
The number of bands to use in combos. This must not exceed the number of
bands in `x_img`.
Returns
-------
OrderedDict
The dict keys are the r^2 values and the dict values are the zero
indexed band combinations in list format. The best band combo (the one
with the highest r^2) will be the first item in the dict.
"""
od = OrderedDict()
nbands = x_img.shape[-1]
for comb in itertools.combinations( range(nbands), n ):
od[ get_selfscore(comb,x_img,y_depths) ] = comb
od_sort = sorted( od.items(), key=lambda t: t[0], reverse=True )
return OrderedDict(od_sort)
def best_combo(x_img,y_depths,n=2):
"""
Evaluate all combinations of `n` bands from `x_img` for regression against
`y_depths` and return the best band combo as a tuple of zero-indexed band
numbers. "Best" is determined by choosing the band combo with the greatest
r^2 value.
Parameters
----------
x_img : np.array or MaskedArray
The image array. If you're following Lyzenga et al. 2006, these will be
radiance values transformed according to Lyzenga 1978 eq. 7 (see Lyzenga
et al. 2006 eq. 8).
y_depths : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_img`. This array
must have the same dimensions as a single band of `x_img`.
n : int, optional
The number of bands to use in combos. This must not exceed the number of
bands in `x_img`.
Returns
-------
tuple of ints
This will be the best band combo to use for depth regression. The length
of this tuple will be `n`.
"""
return ranked_combos(x_img,y_depths,n).values()[0]
def tuned_linear_model(x_img,y_depths,n=2,n_jobs=4):
"""
Find the best combo of `n` bands from `x_img` and return a model tuned to
the training data (`x_img` and `y_depths`).
Parameters
----------
x_img : np.array or MaskedArray
The image array. If you're following Lyzenga et al. 2006, these will be
radiance values transformed according to Lyzenga 1978 eq. 7 (see Lyzenga
et al. 2006 eq. 8).
y_depths : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_img`. This array
must have the same dimensions as a single band of `x_img`.
n : int, optional
The number of bands to use in the estimation model. Must not exceed the
number of bands in `x_img`.
Returns
-------
fit object
For information on this object see:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
"""
ind = best_combo(x_img,y_depths,n)
return get_fit(ind,x_img,y_depths,n_jobs=n_jobs)
def fit_and_predict(x_train,y_train,x_img,n=2,n_jobs=4):
"""
Build Lyzenga depth estimation model trained on `x_train` (transformed
imagery) and `y_train` (depths) and use it to estimate depths for `x_img`.
Parameters
----------
x_train : np.ma.MaskedArray
The image array subset to be used for training the model. If you're
following Lyzenga et al. 2006, these will be radiance values transformed
according to Lyzenga 1978 eq. 7 (see Lyzenga et al. 2006 eq. 8).
y_train : np.array or MaskedArray
The measured depths that correspond to the pixels in `x_train`. This
array must have the same dimensions as a single band of `x_train`.
x_img : np.array or MaskedArray
The image array for which you want depth estimates. Must have the same
dimensions and preprocessing as `x_train`.
n : int, optional
The number of bands to use in the estimation model. Must not exceed the
number of bands in `x_img`.
Returns
-------
np.array
Predicted depths for `x_img`.
"""
outarr = x_img[...,0].copy()
ind = best_combo(x_train,y_train,n)
modl = get_fit(ind,x_train,y_train,n_jobs=n_jobs)
pred = modl.predict(x_img[...,ind].compressed().reshape(-1,n))
outarr[~outarr.mask] = pred
return outarr
## Deep water masking --------------------------------------------------------
# These methods are implementations of the preprocessing steps in section V
# of Lyzenga et al. 2006.
def dark_pixels( imarr, p=10 ):
"""
Return a single band boolean array with all pixels <= the `p` percentile of
brightness (across all bands) marked as `True`. All other pixels marked as
`False`. This method was developed to carry out the following step from
section V of Lyzenga et al. 2006:
"A first estimate of the mean deep-water radiance is calculated by first
identifying the tenth-percentile brightness magnitude (using all four
bands) within the water area of interest."
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
p : int or float (Default value = 10)
The percentile of brightness to use as the threshold for declaring a
pixel 'dark'. Lyzenga et al. 2006 used the 10th percnetile so that's
the default.
Returns
-------
boolean array
True where pixels are <= `p` percentile of brightness and False for all
other pixels.
"""
# average DNs to get (RxCx1) brightness
brt = imarr.mean(axis=2)
dim_thresh = np.percentile( brt.compressed(), p )
dark_pix = ( brt <= dim_thresh )
if np.ma.is_masked( dark_pix ):
dark_pix.set_fill_value( False )
dark_pix = dark_pix.filled()
return dark_pix
def moving_window( dark_arr, win_size=3 ):
"""
Find average value of pixels in a square window. Used on a boolean array,
this can be used to find the percentage of pixels marked as `True`.
Parameters
----------
dark_arr : boolean array
This is an (RxCx1) array of pixels that are considered dark. The
`dark_pixels` method in this module can be used to create this array.
win_size : int (Default value = 3)
The size of the moving window to be used. Lyzenga et al. 2006 uses a
3x3 window so the default is 3.
Returns
-------
array
An array the same shape as `dark_arr` with values representing the
proportion of pixels in the surrounding window that are `True` in
`dark_arr`.
"""
win = morphology.square( win_size )
npix = win.size
if np.ma.is_masked( dark_arr ):
outarr = rank.sum( dark_arr.filled().astype('uint8'), win ) / float( npix )
outarr = np.ma.MaskedArray( outarr, mask=dark_arr.mask, fill_value=dark_arr.fill_value )
else:
outarr = rank.sum( dark_arr.astype('uint8'), win ) / float( npix )
return outarr
def dark_kernels( imarr, p=10, win_size=3, win_percentage=50 ):
"""
Return a single band boolean image array where pixels are `True` if at
least 50% of the surrounding pixels are at or below the `p` percentile of
image brightness. This is an implementation of the following sentence from
Lyzenga et al. 2006:
"A moving window is then passed through the image to identify kernels that
contain more than 50% of pixels at or below the 10% brightness threshold."
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
p : int or float (Default value = 10)
The percentile of brightness to use as the threshold for declaring a
pixel 'dark'. Lyzenga et al. 2006 used the 10th percnetile so that's
the default.
win_size : int (Default value = 3)
The size of the moving window to be used. Lyzenga et al. 2006 uses a
3x3 window so the default is 3.
win_percentage : int or float (Default value = 50)
The percentage of the moving window that must be at or below the
threshold. Lyzenga et al. 2006 used 50% so that's the default.
Returns
-------
numpy boolean array
An (RxC) shaped boolean array. `True` values are "dark kernels".
"""
dps = dark_pixels( imarr, p=p )
if win_size:
dmeans = moving_window( dps, win_size=win_size )
dps = dmeans >= (win_percentage/100.0)
return dps.astype('bool')
def dark_pixel_array( imarr, p=10, win_size=3, win_percentage=50 ):
"""
Return a masked version of imarr where only the "dark kernels" described in
section V of Lyzenga et al. 2006 are left unmasked.
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
p : int or float (Default value = 10)
The percentile of brightness to use as the threshold for declaring a
pixel 'dark'. Lyzenga et al. 2006 used the 10th percnetile so that's
the default.
win_size : int (Default value = 3)
The size of the moving window to be used. Lyzenga et al. 2006 uses a
3x3 window so the default is 3.
win_percentage : int or float (Default value = 50)
The percentage of the moving window that must be at or below the
threshold. Lyzenga et al. 2006 used 50% so that's the default.
Returns
-------
numpy MaskedArray
A masked version of imarr where only the "dark kernels" described in
section V of Lyzenga et al. 2006 are left unmasked.
"""
nbands = imarr.shape[-1]
dp = dark_kernels( imarr, p, win_size, win_percentage )
dparr = imarr.copy()
dparr.mask = ~np.repeat( np.atleast_3d(dp), nbands, 2 )
return dparr
def deep_water_means(imarr, n_std=0, p=10, win_size=3, win_percentage=50):
"""
Return deep water mean values for each band of `imarr`.
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
n_std : int or float
The number of standard deviations to subtract from the deep water means
before returning them. This can get you around the 'over deduction'
problem described by Ji et al. 1992. See Armstrong 1993 for an example
of this (though he attributes it to sensor noise).
p : int or float (Default value = 10)
The percentile of brightness to use as the threshold for declaring a
pixel 'dark'. Lyzenga et al. 2006 used the 10th percnetile so that's
the default.
win_size : int (Default value = 3)
The size of the moving window to be used. Lyzenga et al. 2006 uses a
3x3 window so the default is 3.
win_percentage : int or float (Default value = 50)
The percentage of the moving window that must be at or below the
threshold. Lyzenga et al. 2006 used 50% so that's the default.
"""
nbands = imarr.shape[-1]
dpa = dark_pixel_array(imarr, p=p, win_size=win_size,
win_percentage=win_percentage)
dpaavg = dpa.reshape(-1, nbands).mean(0).data
dpastd = dpa.reshape(-1, nbands).std(0).data
Rinf = dpaavg - (n_std * dpastd)
return Rinf
def bg_thresholds( dark_arr, n_std=3 ):
"""
Calculate band-wise mean radiance plus 3 standard deviations for pixels in
`dark_arr`. Lyzenga et al. 2006 says:
"...the blue and green bands are thresholded at the deep-water mean
radiance plus three standard deviations."
This method will calculate the mean + 3 std for all bands. You'll have to
pick out the blue and green ones later if that's what you're after.
Parameters
----------
dark_arr : numpy Masked Array
Typically, this will be the output of `Lyzenga2006.dark_pixels_array`.
n_std : int (Default value = 3)
The number of standard deviations to add to the mean. Lyzenga et al.
2006 uses 3 so that's the default.
Returns
-------
numpy array
A 1D array with as many elements as there are bands in `dark_arr`. Each
element corresponds to the threshold for its respective band.
"""
nbands = dark_arr.shape[-1]
darkmeans = dark_arr.reshape(-1,nbands).mean(0).data
darkstds = dark_arr.reshape(-1,nbands).std(0).data
return darkmeans + n_std * darkstds
## Glint correction ----------------------------------------------------------
# These methods are derived from section III of Lyzenga et al. 2006.
def nir_mean(msarr,nir_band=7):
"""
Calculate the mean of the (unmasked) values of the NIR (near infrared) band
of an image array. The default `nir_band` value of 7 selects the NIR2 band
in WorldView-2 imagery. If you're working with a different type of imagery,
you will need figure out the appropriate value to use instead.
Parameters
----------
msarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
float
The mean radiance in the NIR band.
"""
return msarr[...,nir_band].mean()
def cov_ratio(msarr,band,nir_band=7):
"""
Calculate the r_ij value according to equation 5 from Lyzenga et al. 2006.
Parameters
----------
msarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
band : int
The band to calculate r_ij for. Essentially, the value i in equation 5.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
float
The covariance ratio r_ij described in equation 5 of Lyzenga et al.
2006.
"""
if np.ma.is_masked( msarr ):
b = msarr[...,band].compressed()
nir_b = msarr[...,nir_band].compressed()
else:
b = msarr[...,band].flatten()
nir_b = msarr[...,nir_band].flatten()
cov_mat = np.cov( b, nir_b, bias=1 )
return cov_mat[0,1] / cov_mat[1,1]
def cov_ratios(msarr,nir_band=7):
"""
Calculate the r_ij value according to equation 5 from Lyzenga et al. 2006
for each band of an image array.
Parameters
----------
msarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
numpy array
An array of r_ij values calculated by `OpticalRS.Lyzenga2006.cov_ratio`.
One r_ij value for each band of `msarr`.
"""
nbands = msarr.shape[-1] #assume Rows,Cols,Bands shape
bands = range(nbands)
cov_rats = []
if nir_band in bands: bands.remove(nir_band)
for band in bands:
cov_rat = cov_ratio(msarr,band,nir_band)
cov_rats.append(cov_rat)
return np.array(cov_rats)
def glint_correct_image(imarr, glintarr, nir_band=7):
"""
Apply the sunglint removal algorithm from section III of Lyzenga et al.
2006 to a multispectral image array.
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
glintarr : numpy array
A subset of `imarr` from an optically deep location with sun glint.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
numpy array
A de-glinted copy of `imarr`.
Notes
-----
This deglinting method may not work well on WorldView-2 imagery because the
bands are not captured exactly concurrently. See section II B of Eugenio et
al. 2015 [1]_ for more information and a different sunglint correction
algorithm that may be more appropriate.
References
----------
.. [1] Eugenio, F., Marcello, J., Martin, J., 2015. High-Resolution Maps of
Bathymetry and Benthic Habitats in Shallow-Water Environments Using
Multispectral Remote Sensing Imagery. IEEE Transactions on Geoscience
and Remote Sensing 53, 3539–3549. doi:10.1109/TGRS.2014.2377300
"""
# calculate the covariance ratios
cov_rats = cov_ratios(glintarr,nir_band)
# get the NIR mean
nirm = nir_mean(glintarr,nir_band)
# we don't want to try to apply the correction
# to the NIR band
nbands = imarr.shape[-1]
bands = range(nbands)
bands.remove(nir_band)
outarr = imarr.copy()
for i,band in enumerate(bands):
outarr[:,:,band] = imarr[:,:,band] - cov_rats[i] * ( imarr[:,:,nir_band] - nirm )
# this will leave the NIR band unchanged
return outarr
def glint_correct_wv2(imrds, glintgeom):
"""
This is a special case of the sunglint method for WorldView-2 imagery. The
WV2 multispectral bands are divided into two groups, MS1 and MS2. The two
groups are captured at slightly different times. This means that not all the
visible bands covary with both NIR bands. This method splits the WV2 bands
and applies the Lyzenga glint correction method to MS1 and MS2 groups
separately, and then reassbles the results.
Parameters
----------
imrds : OpticalRS.RasterDS
The raster dataset for the WV2 image that you'd like to correct.
glintgeom : shapely geometry
A polygon from within `imrds` that defines an area of optically deep
water with sun glint.
Returns
-------
barr : numpy image array
A version of `imrds` with sun glint removal applied.
"""
ms1_bands = [1,2,4,6]
ms2_bands = [0,3,5,7]
barr = imrds.band_array
garr = imrds.geometry_subset(glintgeom)
barr1 = barr[...,ms1_bands]
garr1 = garr[...,ms1_bands]
barr2 = barr[...,ms2_bands]
garr2 = garr[...,ms2_bands]
sg1 = glint_correct_image(barr1, garr1, 3)
sg2 = glint_correct_image(barr2, garr2, 3)
barr[...,ms1_bands] = sg1
barr[...,ms2_bands] = sg2
return barr
| bsd-3-clause |
saimn/astropy | docs/conf.py | 2 | 12625 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import os
import sys
import configparser
from datetime import datetime
from packaging.requirements import Requirement
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("X.Y.Z")` here.
check_sphinx_version("1.2.1") # noqa: F405
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy'] # noqa: F405
# add any custom intersphinx for astropy
intersphinx_mapping['pyerfa'] = ('https://pyerfa.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pytest'] = ('https://pytest.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None) # noqa: F405, E501
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None) # noqa: F405, E501
intersphinx_mapping['packagetemplate'] = ('https://docs.astropy.org/projects/package-template/en/latest/', None) # noqa: F405, E501
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/stable/', None) # noqa: F405
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates') # noqa: F405
exclude_patterns.append('changes') # noqa: F405
exclude_patterns.append('_pkgtemplate.rst') # noqa: F405
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them # noqa: F405, E501
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
extensions += ["sphinx_changelog"] # noqa: F405
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, 'setup.cfg'))
__minimum_python_version__ = setup_cfg['options']['python_requires'].replace('>=', '')
project = u'Astropy'
min_versions = {}
for line in importlib_metadata.requires('astropy'):
req = Requirement(line.split(';')[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += "\n".join(
f".. |minimum_{name}_version| replace:: {min_versions[name]}"
for name in ('numpy', 'pyerfa', 'scipy', 'pyyaml', 'asdf', 'matplotlib', 'ipython')) + f"""
.. |minimum_python_version| replace:: {__minimum_python_version__}
.. Astropy
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
"""
# -- Project information ------------------------------------------------------
author = u'The Astropy Developers'
copyright = f'2011–{datetime.utcnow().year}, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = importlib_metadata.version(project)
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# Only include dev docs in dev version.
dev = 'dev' in release
if not dev:
exclude_patterns.append('development/*') # noqa: F405
exclude_patterns.append('testhelpers.rst') # noqa: F405
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ['astropy.']
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
# html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f'{project} v{release}'
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
'to_be_indexed': ['stable', 'latest'],
'is_development': dev
}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = 'https://github.com/astropy/astropy/issues/'
edit_on_github_branch = 'main'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# This is not used. See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery # noqa: F401
extensions += ["sphinx_gallery.gen_gallery"] # noqa: F405
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template # noqa: E501
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_" # noqa: E501
'examples_dirs': f'..{os.sep}examples', # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'https://matplotlib.org/',
'numpy': 'https://numpy.org/doc/stable/',
},
'abort_on_example_error': True
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/',
'https://maia.usno.navy.mil/',
'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer',
'https://aa.usno.navy.mil/publications/docs/Circular_179.php',
'http://data.astropy.org',
r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs. """
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context)
source[0] = rendered
def setup(app):
if sphinx_gallery is None:
msg = ('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
| bsd-3-clause |
oesteban/dipy | dipy/tests/test_scripts.py | 9 | 4292 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Run scripts and check outputs
"""
from __future__ import division, print_function, absolute_import
import os
import shutil
from os.path import (dirname, join as pjoin, abspath)
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
from .scriptrunner import ScriptRunner
runner = ScriptRunner(
script_sdir = 'bin',
debug_print_var = 'NIPY_DEBUG_PRINT')
run_command = runner.run_command
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@nt.dec.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory():
tracks_file = get_data('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
assert_equal(out[0], 0)
| bsd-3-clause |
steinnp/Big-Data-Final | Classification/bayes_most_informative.py | 1 | 3013 | import nltk
import csv
import matplotlib.pyplot as plt
word_features = []
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
global word_features
for word in word_features:
features[word] = (word in document_words)
return features
def get_most_informative_features_with_values(clf, n=100):
# Determine the most relevant features, and display them.
cpdist = clf._feature_probdist
to_return = []
print('Most Informative Features')
for (fname, fval) in clf.most_informative_features(n):
def labelprob(l):
return cpdist[l, fname].prob(fval)
labels = sorted([l for l in clf._labels
if fval in cpdist[l, fname].samples()],
key=labelprob)
if len(labels) == 1:
continue
l0 = labels[0]
l1 = labels[-1]
if cpdist[l0, fname].prob(fval) == 0:
ratio = 'INF'
else:
ratio = float((cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval)))
if l0 == 'pos':
ratio = ratio * -1
to_return.append((fname, ratio))
return to_return
def plot_most_important_words(tweets, predicts):
labels = []
new_tweets = []
for i, la in enumerate(predicts):
if la == 0:
new_tweets.append(tweets[i])
labels.append('neg')
if la == 1:
pass
if la == 2:
new_tweets.append(tweets[i])
labels.append('pos')
train = list(zip(new_tweets, labels))
tweets = []
for (words, sentiment) in train:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
global word_features
word_features = get_word_features(get_words_in_tweets(tweets))
training_set = nltk.classify.apply_features(extract_features, tweets)
# training_set = nltk.classify.apply_features(word_features, tweets)
clf = nltk.NaiveBayesClassifier.train(training_set)
mostinf = get_most_informative_features_with_values(clf, 20)
# mostinf = clf.get_most_informative_features_with_values(20)
mostinf = sorted(mostinf, key=lambda x: x[1])
words = [i[0] for i in mostinf]
values = [i[1] for i in mostinf]
x_range = [i for i in range(len(words))]
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(1, 1, 1)
colors = ['red' if v < 0 else 'green' for v in values]
values = sorted([abs(n) for n in values])
ax.barh(x_range, values, align='center', color=colors)
ax.set_yticks(x_range)
ax.set_yticklabels(words)
ax.set_xlabel('Word impact')
plt.title("Most informative features")
#plt.show()
| mit |
justincassidy/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
glenflet/ZtoRGBpy | ZtoRGBpy/_core.py | 1 | 22787 | # -*- coding: utf-8 -*-
# =================================================================================
# Copyright 2019 Glen Fletcher <mail@glenfletcher.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All documentation this file as docstrings or comments are licensed under the
# Creative Commons Attribution-ShareAlike 4.0 International License; you may
# not use this documentation except in compliance with this License.
# You may obtain a copy of this License at
#
# https://creativecommons.org/licenses/by-sa/4.0
#
# =================================================================================
"""
ZtoRGB core module
Provides all core functions
.. moduleauthor:: Glen Fletcher <mail@glenfletcher.com>
"""
from collections.abc import Sequence, Mapping
from copy import copy
from inspect import getfullargspec
import numpy as np
class Scale(object):
"""Abstract base class for defining a scaling function for the remapping of complex values to RGB colors.
Automatically generates `repr` of subclasses (see **attributes** to define parameters from subclasses).
Attributes
----------
__args: `Sequence <collections.abc.Sequence>`
List of Positional argument values for specific subclass
__kwargs: `Mapping <collections.abc.Mapping>`
Mapping of keyword only argument values for specific subclass
See Also
--------
`custom_scale`
"""
def __init__(self):
pass
def __call__(self, value):
"""Transform value with scaling function
This method must be overridden by the subclass to define the transformation.
Parameters
----------
value: `array_like <numpy.asarray>` [...]
Array of value to be transformed, by the scaling function.
Returns
-------
scaled: `array <numpy.ndarray>` [ ``value.shape`` ]
Scaled transformation (:math:`T(v)`) of ``value`` such that :math:`0 \le |T(v)| \le 1`.
"""
raise NotImplementedError()
def ticks(self):
"""Returns a list of tick marks suitable for a colorbar
This method must be overridden by the subclass to define how tick should be displayed on the colorbar.
Returns
-------
offsets: `Sequence <collections.abc.Sequence>` [ `float` ]
Positions in the scaled interval :math:`[0.0, 1.0]` at which tick marks should be drawn.
labels: `Sequence <collections.abc.Sequence>` [ `str` ]
Labels to be displayed for each tick mark, non `str` types will be converted to `str`.
"""
raise NotImplementedError()
def _format_param(self, arg, cls=None, name=None):
if name is not None and isinstance(name, str):
kw = name.strip() + "="
else:
kw = ""
return kw + self.format_arg(arg, cls, name)
def format_arg(self, arg, cls=None, name=None):
"""Format argument
Parameters
----------
arg: `object`
argument to be formatted
cls: `type`
class that uses the argument
name: `str`
name of argument
Returns
-------
repr: `str`
String representation of argument
"""
if isinstance(arg, float):
return "{0:g}".format(arg)
else:
return repr(arg)
def __repr__(self):
args = []
arg_class = []
kwargs = {}
kwarg_names = []
kwarg_class = {}
for subclass in type(self).__mro__:
if subclass == Scale:
break
if subclass.__init__ != subclass.__mro__[1].__init__:
spec = getfullargspec(subclass.__init__)
subclass_args = []
subclass_kwargs = {}
if hasattr(self, '_{0:s}__args'.format(subclass.__name__)):
subclass_args = getattr(self, '_{0:s}__args'.format(subclass.__name__))
if not isinstance(subclass_args, Sequence):
subclass_args = []
if hasattr(self, '_{0:s}__kwargs'.format(subclass.__name__)):
subclass_kwargs = getattr(self, '_{0:s}__kwargs'.format(subclass.__name__))
if not isinstance(subclass_args, Mapping):
subclass_kwargs = {}
num_args = len(spec.args) - 1
if spec.defaults is not None:
num_defaults = len(spec.defaults)
else:
num_defaults = 0
num_values = len(subclass_args)
if num_values + num_defaults < num_args:
raise RuntimeError("{0:s} dose not define __args "
"for non-default arguments".format(subclass.__name__))
for i in range(num_args):
if num_values > i:
args.append(subclass_args[i])
else:
args.append(spec.defaults[i - (num_args - num_defaults)])
arg_class.append(subclass)
for kwname in spec.kwonlyargs:
if kwname not in kwarg_names:
if kwname in subclass_kwargs:
kwargs[kwname] = subclass_kwargs[kwname]
elif spec.kwonlydefaults is not None and kwname in spec.kwonlydefaults:
kwargs[kwname] = spec.kwonlydefaults[kwname]
else:
raise RuntimeError("{0:s} dose not define __kwargs "
"for non-default keyword arguments".format(subclass.__name__))
kwarg_names.append(kwname)
kwarg_class[kwname] = subclass
arglist = ', '.join(self._format_param(arg, cls) for arg, cls in zip(args, arg_class))
if len(arglist) > 0 and len(kwarg_names) > 0:
arglist += ', '
arglist += ', '.join(self._format_param(kwargs[kwname], kwarg_class[kwname], kwname) for kwname in kwarg_names)
if type(self).__module__ == '__main__':
return "{0:s}({1:s})".format(type(self).__name__, arglist)
else:
return "{0:s}.{1:s}({2:s})".format(type(self).__module__, type(self).__name__, arglist)
class LinearScale(Scale):
"""Linear Scaling
Provides a transformation representing a simple linear transformation,
mapping the interval``[0.0, vmax]`` into the interval ``[0.0, 1.0]``, when invoked."""
_divisors = [5, 10, 20, 25]
def __init__(self, vmax=1.0):
self.__args = [vmax]
Scale.__init__(self)
self.mag = float(vmax)
def __call__(self, value):
return np.asarray(value) / self.mag
def ticks(self):
"""Returns a list of tick marks suitable for a colorbar
Generates 4 to 10 linear steps where the steps are :math:`S = 10^m \\cdot \\{ 5, 10, 20, 25 \\}` with
:math:`m \\in \\mathbb{Z}` and starting at 0 to give :math:`N` steps.
Returns
-------
offsets: `Sequence <collections.abc.Sequence>` [ `float` ]
Returns the `Sequence` :math:`0, \\frac{S}{vmax}, \\dots, \\frac{(N - 1) \\cdot S}{vmax}`
labels: `Sequence <collections.abc.Sequence>` [ `str` ]
Returns the `Sequence` :math:`0, S, \\dots, (N-1) \\cdot S`, formatted using the
`General format <formatspec>` (``'g'``).
"""
divisor = 0
dfactor = 1.0
while True:
maxsteps = np.ceil(self.mag / (LinearScale._divisors[divisor]
* dfactor))
if maxsteps < 4:
if divisor == 0:
dfactor /= 10
divisor = len(LinearScale._divisors) - 1
else:
divisor -= 1
elif maxsteps > 10:
if divisor == len(LinearScale._divisors) - 1:
dfactor *= 10
divisor = 0
else:
divisor += 1
else:
break
stepsize = LinearScale._divisors[divisor] * dfactor
steps = np.arange(stepsize, self.mag, stepsize)
offsets = steps / self.mag
steps = ["{:g}".format(step) for step in steps]
return offsets, steps
class LogScale(Scale):
"""Logarithmic Scaling
Provides a transformation representing a logarithmic transformation,
mapping interval :math:`[v_{min}, v_{max}]` into the interval :math:`[1-l_{max}, 1]`.
"""
def __init__(self, vmin=0.01, vmax=1.0, *, lmax=0.9):
self.__args = [vmin, vmax]
self.__kwargs = {'lmax': lmax}
Scale.__init__(self)
self.logmin = np.log10(vmin)
self.logmax = np.log10(vmax)
self.lightness_max = lmax
self.lightness_buf = 1.0 - lmax
self.factor = self.lightness_max/(self.logmax-self.logmin)
def __call__(self, value):
"""Transform value with scaling function
Parameters
----------
value: `array_like <numpy.asarray>` [...]
Array of values to be transformed, by the scaling function.
Returns
-------
scaled: `array <numpy.ndarray>` [ ``value.shape`` ]
Scaled transformation (:math:`T(v)`) of ``value``
Notes
-----
Performs a logarithmic transformation, mapping the interval :math:`[v_{min}, v_{max}]` into the interval
:math:`[1-l_{max}, 1]` using the transformation:
.. math::
T(v) = (1 - l_{max}) + \\frac{v \\cdot log_{10} ( |v| - log_{10} ( v_{min} ) \cdot l_{max} }
{|v| \\cdot ( log_{10} ( v_{max} ) - log_{10} ( v_{min} ) )}
"""
value = np.asarray(value)
avalue = abs(value)
return self.lightness_buf+(value*(np.log10(avalue) -
self.logmin)/avalue)*self.factor
def ticks(self):
"""Returns a list of tick marks suitable for a colorbar
Generates 3 to 6 logarithmic steps such that the steps are all powers of 10 and bound by the interval
``[vmin, vmax]``
Returns
-------
offsets: `Sequence <collections.abc.Sequence>` [ `float` ]
Returns the `Sequence` :math:`T(10^{t_{min}}), \\dots, T(10^{t_{max}})`
labels: `Sequence <collections.abc.Sequence>` [ `str` ]
Returns the `Sequence` :math:`10^{t_{min}}, \\dots, 10^{t_{max}}`, as strings using latex math,
as supported by `matplotlib`\ 's maths rendering.
"""
lmax = np.floor(self.logmax)
lmin = np.ceil(self.logmin)
logrange = int(lmax - lmin) + 1
while logrange > 6:
for n in range(5, 2, -1):
if (logrange - 1) % n == 0:
logrange = n + 1
break
else:
if self.logmax - lmax > lmin - self.logmin:
lmin += 1
else:
lmax -= 1
logrange = int(lmax - lmin) + 1
steps = np.linspace(lmin, lmax, logrange)
values = ["$10^{{{0:.0f}}}$".format(s) for s in steps]
offsets = self(10 ** steps)
return offsets, values
class RGBColorProfile(object):
"""
Defines a color profile in a given RGB color space by conversion factors for
red, green and blue to the Y component of the XYZ color space, i.e. the white point.
Parameters
----------
weights: `tuple` [ `float`, `float`, `float` ], optional, default: (2126.0, 7152.0, 772.0)
Color component weight triple (:math:`W_R, W_G, W_B`) for conversion to the XYZ color space
Defaults to the sRGB color space defined by IEC\ :cite:`RGBColorProfile-IECsRGB`.
gamma: `float`, optional, defaults: 0.5
Gamma (:math:`\gamma`) correction.
"""
def __init__(self, weights=(2126.0, 7152.0, 722.0), gamma=0.5):
self.weights = weights
self.gamma = gamma
def get_ratios(self):
"""Returns the relative ratios for red and blue
Returns
-------
red_ratio: `float`
Relative ratio of red (:math:`K_R`)
blue_ratio: `float`
Relative ratio of blue (:math:`K_B`)
Notes
-----
Ratios for red (:math:`K_R`) and blue (:math:`K_B`) are defined by:
.. math::
K_R =& \\frac{W_R}{W_R + W_G + W_B}\\\\
K_B =& \\frac{W_B}{W_R + W_G + W_B}
"""
red_ratio = self.weights[0] / (self.weights[0] +
self.weights[1] + self.weights[2])
blue_ratio = self.weights[2] / (self.weights[0] +
self.weights[1] + self.weights[2])
return red_ratio, blue_ratio
# noinspection PyPep8Naming
def remove_gamma(self, RGB):
"""Removes gamma correction from color
Parameters
----------
RGB : `array_like <numpy.asarray>` [...]
gamma corrected RGB color values
Returns
-------
rgb : `array <numpy.ndarray>` [ ``RGB.shape`` ]
non-gamma corrected RGB color values
Notes
-----
Equivalent to :math:`{rgb} = {RGB}^{\\frac{1}{\\gamma}}`
"""
return np.asarray(RGB) ** (1.0 / self.gamma)
def apply_gamma(self, rgb):
"""Applies gamma correction to color
Parameters
----------
rgb : `array_like <numpy.asarray>` [...]
non-gamma corrected RGB color values
Returns
-------
RGB : `array <numpy.asarray>` [ ``rgb.shape`` ]
gamma corrected RGB color values
Notes
-----
Equivalent to :math:`{RGB} = {rgb}^{\\gamma}`"""
return np.asarray(rgb) ** self.gamma
def get_limits(self):
"""Returns :math:`U_{max}` and :math:`V_{max}`
Returns
-------
Vmax: `float`
V channel magnitude limit (:math:`V_{max}`)
Umax: `float`
U channel magnitude limit (:math:`U_{max}`)
Notes
-----
:math:`U_{max}` and :math:`V_{max}` represent the limits of the UV color space,
as defined by\ :cite:`RGBColorProfile-fletcher2019`:
.. math::
V_{max} =& \\frac{\\sqrt[3]{K_R}}{\\sqrt[3]{K_R} + \\sqrt[3]{K_B}}\\\\
U_{max} =& \\frac{\\sqrt[3]{K_B}}{\\sqrt[3]{K_R} + \\sqrt[3]{K_B}}
"""
red_ratio, blue_ratio = self.get_ratios()
blue_ratio_cuberoot = blue_ratio ** (1 / 3.0)
red_ratio_cuberoot = red_ratio ** (1 / 3.0)
# Get U, V trans_matrix based on color ratios
vmax = red_ratio_cuberoot / (red_ratio_cuberoot + blue_ratio_cuberoot)
umax = blue_ratio_cuberoot / (red_ratio_cuberoot + blue_ratio_cuberoot)
return umax, vmax
def get_transform(self):
"""Returns the UV to RGB transformation matrix :math:`\\mathbf{K_Q}`.
Returns
-------
transformation: `array <numpy.ndarray>` [3, 2, dtype = `float`]
transformation matrix :math:`\\mathbf{K_Q}`
Notes
-----
The transformation matrix :math:`\\mathbf{K_Q}` is defined by\ :cite:`RGBColorProfile-fletcher2019`:
.. math::
\\mathbf{K_Q} &=
\\left[\\begin{matrix}
0 & \\frac{1-K_R}{V_{max}} \\\\
\\frac{K_B\\left( 1-K_B \\right)}{U_{max}\\left( K_B+K_R-1 \\right)} &
\\frac{K_R\\left( 1-K_R \\right)}{V_{max}\\left( K_B+K_R-1 \\right)}\\\\
\\frac{1-K_B}{U_{max}} & 0 \\\\
\\end{matrix}\\right]
"""
red_ratio, blue_ratio = self.get_ratios()
umax, vmax = self.get_limits()
trans_matrix = np.zeros((3, 2))
trans_matrix[0, 1] = (1 - red_ratio / vmax)
trans_matrix[1, 0] = (blue_ratio * (1 - blue_ratio) /
(umax * (blue_ratio + red_ratio - 1)))
trans_matrix[1, 1] = (red_ratio * (1 - red_ratio) /
(vmax * (blue_ratio + red_ratio - 1)))
trans_matrix[2, 0] = (1 - blue_ratio / umax)
return trans_matrix
def get_chroma_limit(self):
"""Return the limiting chroma
Returns
-------
chroma_limit: `float`
Limiting Chroma for the colorspace
Notes
-----
The limiting chroma is computed from the transformation matrix
as described in Fletcher 2019\ :cite:`RGBColorProfile-fletcher2019`.
"""
trans_matrix = self.get_transform()
chroma_limit = [trans_matrix[0, 1], trans_matrix[2, 0]]
kg2 = (trans_matrix[1, 0] ** 2 + trans_matrix[1, 1] ** 2)
chroma_limit.append(- ((trans_matrix[1, 0] + np.sqrt(kg2)) * kg2 /
(kg2 + trans_matrix[1, 0] * np.sqrt(kg2))))
chroma_limit.append(- ((trans_matrix[1, 0] - np.sqrt(kg2)) * kg2 /
(kg2 - trans_matrix[1, 0] * np.sqrt(kg2))))
return 1 / max(chroma_limit)
def __repr__(self):
return "{0:s}.{1:s}({2!r:s}, {3:g})".format(type(self).__module__,
type(self).__name__,
self.weights, self.gamma)
# pylint: disable=C0103
# These constants should start with lowercase s, as this is the correct
# usage, for writing sRGB
sRGB_HIGH = RGBColorProfile((2126.0, 7152.0, 722.0), 0.5)
sRGB_LOW = RGBColorProfile((2126.0, 7152.0, 722.0), 1)
sRGB = copy(sRGB_HIGH)
# pylint: enable=C0103
def remap(data, scale=None, profile=None, return_int=False, return_metadata=False, **kwargs):
"""Converts an array of complex values to RGB triples
For 2d arrays of complex numbers the returned array is suitable
for passing to `pyplot.imshow <matplotlib.pyplot.imshow>` from matplotlib.
Parameters
----------
data : `array_like <numpy.asarray>` [...]
Complex input data.
scale : {`Scale`, 'linear', 'log'}, optional, default: `None`
Use to define the magnitude scaling of the data. Data is transformed by this object to a interval
of :math:`[0.0, 1.0]`
If passed an instance of `Scale`, then this instance is use to scale the data.
If passed a subclass of `Scale`, an instance is then created of this subclass, as
``scale([min(abs(data)),] max(abs(data)), **kwargs)``.
'linear' or `None` are equivalent to passing `LinearScale`, while 'log' is equivalent to passing `LogScale`
profile: {`RGBColorProfile`, 'srgb', 'srgb_high', 'srgb_low'}, optional, default: `None`
ColorProfile representing the RGB colorspace to convert the complex data to.
'srgb' or `None` are equivalent to passing `sRGB`, while 'srgb_high' or 'srgb_low' are respectively
equivalent to passing `sRGB_HIGH` or `sRGB_LOW`.
return_int : `bool`, optional, default: `False`
If true, returns integers in the interval :math:`[0, 255]`
rather than floats in the interval :math:`[0.0, 1.0]`.
return_metadata : `bool`, optional, default: `False`
Return the scale and profile instance used to generate the mapping.
Other Parameters
----------------
**kwargs :
These parameters are passed to the ``scale`` class when creating an automatic instance for scaling.
Returns
-------
rgb : `array <numpy.ndarray>` [ ``data.shape``, 3]
Array containing RGB color values with the last dimension representing the RGB triplets.
If ``return_int`` is `False` then the values are floating point in the interval :math:`[0.0, 1.0]`.
If ``return_int`` is `True` then the values are integers in the interval :math:`[0, 255]`.
scale : `Scale`
Present only if ``return_metadata`` = `True`. The actual `Scale` instance used to generate the mapping.
profile : `RGBColorProfile`
Present only if ``return_metadata`` = `True`. The actual `RGBColorProfile`
instance used to generate the mapping.
"""
data = np.asarray(data, complex)
if profile is None or isinstance(profile, str) and profile.lower() == 'srgb':
profile = sRGB
elif isinstance(profile, str) and profile.lower() == 'srgb_high':
profile = sRGB_HIGH
elif isinstance(profile, str) and profile.lower() == 'srgb_low':
profile = sRGB_LOW
if not isinstance(profile, RGBColorProfile):
raise ValueError("profile can't be converted to an instance of RGBColorProfile.")
if scale is None or isinstance(scale, str) and scale.lower() == 'linear':
scale = LinearScale
elif isinstance(scale, str) and scale.lower() == 'log':
scale = LogScale
if isinstance(scale, type) and issubclass(scale, Scale):
spec = getfullargspec(scale)
num_args = len(spec.args) - 1
if num_args == 0:
scale = scale(**kwargs)
elif num_args == 1:
scale = scale(np.nanmax(np.abs(data)), **kwargs)
elif num_args > 1:
scale = scale(np.nanmin(np.abs(data)), np.nanmax(np.abs(data)), **kwargs)
if not isinstance(scale, Scale):
raise ValueError("scale can't be converted to an instance of Scale.")
trans_matrix = profile.get_transform()
chroma_limit = profile.get_chroma_limit()
lightness_cutoff = (4 ** (1 / 3.0)) / 2
data = np.asarray(scale(data), complex)
nan = np.isnan(data)
data[nan] = 0
magnitude = np.abs(data).reshape(*(data.shape + (1,)))
data = data.view(float).reshape(*(data.shape + (2,)))
luminance = (1-(1-lightness_cutoff)*np.clip(magnitude, 0, 1))**3
chrome = chroma_limit*(1-luminance)
rbg = np.einsum('qz,...z->...q', trans_matrix, data)
rbg /= (magnitude > 0)*magnitude + (magnitude == 0)
rbg *= chrome
rbg += luminance
rbg = profile.remove_gamma(rbg)
rbg[nan, :] = 0
if return_int:
ret = (rbg * 255).astype('i8')
else:
ret = rbg
if return_metadata:
return ret, scale, profile
else:
return ret
| mit |
silky/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/LPC.py | 24 | 1191 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
lpc = ess.LPC(order=14)
N= 512
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
first = 20000
last = first+N
x1 = x[first:last]
X = fft(hamming(N)*x1)
mX = 20 * np.log10(abs(X[:N/2]))
coeff = lpc(x1)
Y = fft(coeff[0], N)
mY = 20 * np.log10(abs(Y[:N/2]))
plt.figure(1, figsize=(9, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last], 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (soprano-E4.wav)')
plt.subplot(2,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mX-max(mX), 'r', lw=1.5, label="mX")
plt.plot(np.arange(0, fs/2.0, fs/float(N)), -mY-max(-mY)-3, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, fs/2, -60, 3])
plt.title('mX + mY (LPC approximation)')
plt.tight_layout()
plt.savefig('LPC.png')
plt.show()
| agpl-3.0 |
witgo/spark | python/pyspark/sql/pandas/conversion.py | 4 | 21163 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import Counter
from pyspark.rdd import _load_from_socket
from pyspark.sql.pandas.serializers import ArrowCollectSerializer
from pyspark.sql.types import IntegralType
from pyspark.sql.types import ByteType, ShortType, IntegerType, LongType, FloatType, \
DoubleType, BooleanType, MapType, TimestampType, StructType, DataType
from pyspark.traceback_utils import SCCallSiteSync
class PandasConversionMixin(object):
"""
Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`
can use this class.
"""
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting Pandas's :class:`DataFrame` is
expected to be small, as all the data is loaded into the driver's memory.
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
Examples
--------
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import numpy as np
import pandas as pd
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
if self.sql_ctx._conf.arrowPySparkEnabled():
use_arrow = True
try:
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowPySparkFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to "
"false.\n %s" % str(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# Rename columns to avoid duplicated column names.
tmp_column_names = ['col_{}'.format(i) for i in range(len(self.columns))]
batches = self.toDF(*tmp_column_names)._collect_as_arrow()
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
# values, but we should use datetime.date to match the behavior with when
# Arrow optimization is disabled.
pdf = table.to_pandas(date_as_object=True)
# Rename back to the original column names.
pdf.columns = self.columns
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_localize_timestamps(pdf[field.name], timezone)
elif isinstance(field.dataType, MapType):
pdf[field.name] = \
_convert_map_items_to_dict(pdf[field.name])
return pdf
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and can not continue. Note that "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an "
"effect on failures in the middle of "
"computation.\n %s" % str(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
column_counter = Counter(self.columns)
dtype = [None] * len(self.schema)
for fieldIdx, field in enumerate(self.schema):
# For duplicate column name, we use `iloc` to access it.
if column_counter[field.name] > 1:
pandas_col = pdf.iloc[:, fieldIdx]
else:
pandas_col = pdf[field.name]
pandas_type = PandasConversionMixin._to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pandas_col.isnull().any()):
dtype[fieldIdx] = pandas_type
# Ensure we fall back to nullable numpy types, even when whole column is null:
if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.float64
if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.object
df = pd.DataFrame()
for index, t in enumerate(dtype):
column_name = self.schema[index].name
# For duplicate column name, we use `iloc` to access it.
if column_counter[column_name] > 1:
series = pdf.iloc[:, index]
else:
series = pdf[column_name]
if t is not None:
series = series.astype(t, copy=False)
# `insert` API makes copy of data, we only do it for Series of duplicate column names.
# `pdf.iloc[:, index] = pdf.iloc[:, index]...` doesn't always work because `iloc` could
# return a view or a copy depending by context.
if column_counter[column_name] > 1:
df.insert(index, column_name, series, allow_duplicates=True)
else:
df[column_name] = series
pdf = df
if timezone is None:
return pdf
else:
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
@staticmethod
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type
may be wrong. This method gets the corrected data type for Pandas if that type may be
inferred incorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == LongType:
return np.int64
elif type(dt) == FloatType:
return np.float32
elif type(dt) == DoubleType:
return np.float64
elif type(dt) == BooleanType:
return np.bool
elif type(dt) == TimestampType:
return np.datetime64
else:
return None
def _collect_as_arrow(self):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
This is an experimental feature.
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
with SCCallSiteSync(self._sc):
port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
try:
results = list(_load_from_socket((port, auth_secret), ArrowCollectSerializer()))
finally:
# Join serving thread and raise any exceptions from collectAsArrowToPython
jsocket_auth_server.getResult()
# Separate RecordBatches from batch order indices in results
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order]
class SparkConversionMixin(object):
"""
Min-in for the conversion from pandas to Spark. Currently, only :class:`SparkSession`
can use this class.
"""
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
from pyspark.sql import SparkSession
assert isinstance(self, SparkSession)
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
timezone = self._wrapped._conf.sessionLocalTimeZone()
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, str) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % str(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
Returns
-------
list
list of records
"""
from pyspark.sql import SparkSession
assert isinstance(self, SparkSession)
if timezone is not None:
from pyspark.sql.pandas.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
Parameters
----------
rec : numpy.record
a numpy record to check field dtypes
Returns
-------
numpy.dtype
corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in range(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.sql import SparkSession
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, SparkSession)
from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import TimestampType
from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf.iloc[start:start + step] for start in range(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.conversion
globs = pyspark.sql.pandas.conversion.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.conversion tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.conversion, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
demis001/biopandas | tests/testbiopandas.py | 2 | 4473 | import mock
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import unittest
#from bioframes import bioframes as bf
from bioframes import sequenceframes
import sys
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_index_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from bioframes import bioframes
import numpy as np
from operator import itemgetter
if sys.version[0] == '2':
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
def mock_file(func, read_data, *args, **kwargs):
with mock.patch.object(builtins, 'open', mock.mock_open(read_data=read_data)): #, create = True) as m:
with open('_') as handle:
return func(handle, *args, **kwargs)
class TestFastq(unittest.TestCase):
def setUp(self):
self.r = SeqRecord(Seq("ACGTA"), id="Test", letter_annotations = {"phred_quality":[50, 40, 30, 20, 10]})
self.fastq_string = '''@read1
TTTCGAATC
+
FFFFFFFFF
@read2
CTTCGATC
+
AFFDDDDD
@read3
CCGATCAA
+
FF@@@F@F
@read4
TTTCGAATC
+
FFFFFFFFF
'''
with open('tmp.fq', 'w') as tmp: tmp.write(self.fastq_string)
fq = sequenceframes.fqframe(open('tmp.fq'))
#self.df = sequenceframes.load_fastq(open('tmp.fq'))
self.df = bioframes.makeframe(fq)
# self.df = fq.load_fastq()
# r_dict = fq.get_row(self.r)
# self.series = pd.Series(r_dict)
#r_dict = sequenceframes.get_row(self.r)
#TODO: somehow SeqIO broke when I tried to mock_open
# with mock.patch.object(builtins, 'open', mock.mock_open(read_data=self.fastq_string)): #, create = True) as m:
# with open('_') as handle:
# self.df = bf.load_fastq(handle)
#self.df = mock_file(bf.load_fastq, read_data=self.fastq_string)
def test_sanger_quality_error(self):
expected = np.array([.1, .01, .001, .0001, .00001][::-1])
assert_array_almost_equal(self.series['error'], expected)
def test_sanger_quality_string(self):
self.assertEquals(self.series['quality'], 'SI?5+')
def test_data_frame_lengths(self):
expected_len = len(self.fastq_string.split('\n')) / 4
self.assertEquals( expected_len, len(self.df))
def test_dupe_reads(self):
dupe_reads = self.df[self.df.seq == 'TTTCGAATC']
self.assertEquals(2, len(dupe_reads))
def test_dataframe_index(self):
expected_index = pd.Index(['read1', 'read2', 'read3', 'read4'])
assert_index_equal(expected_index, self.df.index)
def test_dataframe_contents(self):
columns = itemgetter( 'description','seq', 'quality', 'qual_ints', 'error')
qlen=len( 'TTTCGAATC')
expected1 = pd.Series(['read1', 'TTTCGAATC', 'FFFFFFFFF', np.array([37]*qlen), np.array( [0.0001]*qlen)])
expected3 = pd.Series(['read4', 'TTTCGAATC', 'FFFFFFFFF', np.array([37]*qlen), np.array( [0.0001]*qlen)])
r1, r4, r2 = map(pd.Series, [columns(self.df.ix['read1']), columns(self.df.ix['read4']), columns(self.df.ix['read2'])])
assert_series_equal( expected1, r1)
assert_series_equal( expected3, r4)
expected_error = np.array( [0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001])
expected_qual_ints = np.array( [32, 37, 37, 35, 35, 35, 35, 35])
expected2 = pd.Series(['read2', 'CTTCGATC', 'AFFDDDDD', expected_qual_ints, expected_error])
assert_series_equal(expected2, r2)
# def test_join_non_unique_dataframes(self):
# '''
# df1 and df2 share an index with duplicates, check that it is aligned correctly
# '''
# rows1 = [('A', 'A1'), ('B', 'B1'), ('A', 'A2'), ('C', 'C1')]
# rows2 = [('A', '0A', False), ('B', '0B', True), ('A', '00A', False), ('C', '00C', True)]
# self.df1, self.df2 = map(make_df_index0, (rows1, rows2))
# self.df1.columns = ['0', '1']
# self.df2.columns = ['0', '1', '2']
# self.df1, self.df2 = self.df1.set_index('0'), self.df2.set_index('0')
# result = a2f.join_non_unique_dataframes(self.df1, self.df2)
# expected = pd.DataFrame(
# [('A', 0, 'A1', '0A', True), ('B', 0, 'B1', '0B', True),
# ('A', 1, 'A2', '00A', False), ('C', 0, 'C1', '00C', True)]
# ).set_index(0).set_index(1, append=True)
# assert_frame_equal(result, expected)
| gpl-2.0 |
raghavrv/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
lsiemens/lsiemens.github.io | theory/fractional_calculus/code/generateanim2dfc.py | 1 | 2823 | import numpy
import fc2dpy
from scipy import special
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
resolution = 64 # 256
order = 64 # 128
PD = fc2dpy.polydisk(0, 0, 1, 1, resolution=resolution)
PD.set_parameterization(1.0 + 0.0j, 1.0 + 0.0j)
def encoding(a):
# return 1.0/fc2dpy.Gamma(-a + 1) ####-------- NICE --------####
beta = 2.0
return (a)**beta
def Log(x, a, order=None):
return (x**a*(numpy.log(x) + special.digamma(1.0) - special.digamma(a + 1))/fc2dpy.Gamma(a + 1))
def func_inverse_a(x, a, order=None):
"""
equivelent to function encoding (a)**-1
"""
import mpmath
gammaincomplete = numpy.frompyfunc(mpmath.gammainc, 2, 1)
output = (-x)**a*(gammaincomplete(-a, -x) - special.gamma(-numpy.array(a, dtype=numpy.complex128)))
if hasattr(x, "__len__") or hasattr(a, "__len__"):
print("array")
return numpy.array(output, dtype=fc2dpy.array_dtype)
else:
print("complex")
return numpy.complex(output)
T = fc2dpy.taylor_encoding(encoding, PD, "A")
fd = fc2dpy.generator_2d_from_1d(T, order=order)
#fd.function = Log
#fd.function_label = "ln(x)"
#fd.function = func_inverse_a
#fd.function_label = "X^a GAMMAINC"
pyplot.ylim(-10, 10)
T.graph_encoding()
fd.vmax = 1
fd.vmin = -1
fd.graph1d_x()
fd.graph1d_a(x=0.0)
fd.graph2d()
fps = 30
number_of_frames = 30*6 #30*60
data = []
for i in range(number_of_frames):
print(i, number_of_frames)
speed = 2*numpy.pi/number_of_frames
fd.polydisk.set_parameterization(1.0, numpy.cos(speed*i) + 1.0j*numpy.sin(speed*i))
# fd.polydisk.set_parameterization(numpy.cos(speed*i) + 1.0j*numpy.sin(speed*i), 1.0)
# temp = numpy.log(numpy.abs(fd.function(fd.polydisk.Grid_x, fd.polydisk.Grid_a, fd.order))) #1
# temp = numpy.angle(fd.function(fd.polydisk.Grid_x, fd.polydisk.Grid_a, fd.order)) #2
temp = fd.function(fd.polydisk.Grid_x, fd.polydisk.Grid_a, fd.order) #2
data.append(temp)
i = 0
vmax, vmin = numpy.nanmax(numpy.abs(data)), numpy.nanmin(numpy.abs(data))
print(vmax, vmin)
fig = pyplot.figure()
def foo(i):
pyplot.clf()
def mapping(data, vmin, vmax):
out = numpy.zeros(shape=data.shape + (3,))
out[:,:,0] = numpy.clip((numpy.real(data) - vmin)/(vmax - vmin), 0, 1)
out[:,:,1] = numpy.clip((numpy.imag(data) - vmin)/(vmax - vmin), 0, 1)
out[:,:,2] = 0.0*numpy.real(data)
return out
# pyplot.imshow(data[i%len(data)], vmin=-vmax, vmax=vmax, interpolation="bicubic", cmap="magma") #1
# pyplot.imshow(data[i%len(data)], vmin=-vmax, vmax=vmax, interpolation="nearest", cmap="hsv") #2
pyplot.imshow(mapping(data[i%len(data)], vmin, vmax), interpolation="nearest") #2
ani = FuncAnimation(fig, foo, frames=number_of_frames, interval=1000/fps)
ani.save("anim.mp4")
pyplot.show()
| mit |